@@ -654,7 +654,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
static void arm_smmu_sva_domain_free(struct iommu_domain *domain)
{
- kfree(domain);
+ kfree(to_smmu_domain(domain));
}
static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
@@ -662,17 +662,20 @@ static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
.free = arm_smmu_sva_domain_free
};
-struct iommu_domain *arm_smmu_sva_domain_alloc(unsigned type)
+struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
+ struct mm_struct *mm)
{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_SVA)
- return ERR_PTR(-EOPNOTSUPP);
-
smmu_domain = arm_smmu_domain_alloc();
if (IS_ERR(smmu_domain))
return ERR_CAST(smmu_domain);
+
+ smmu_domain->domain.type = IOMMU_DOMAIN_SVA;
smmu_domain->domain.ops = &arm_smmu_sva_domain_ops;
+ smmu_domain->smmu = smmu;
return &smmu_domain->domain;
}
@@ -3301,8 +3301,8 @@ static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
- .domain_alloc = arm_smmu_sva_domain_alloc,
.domain_alloc_paging = arm_smmu_domain_alloc_paging,
+ .domain_alloc_sva = arm_smmu_sva_domain_alloc,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
@@ -793,7 +793,8 @@ int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
void arm_smmu_sva_notifier_synchronize(void);
-struct iommu_domain *arm_smmu_sva_domain_alloc(unsigned int type);
+struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
+ struct mm_struct *mm);
void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t id);
#else /* CONFIG_ARM_SMMU_V3_SVA */
@@ -839,5 +840,8 @@ static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
ioasid_t id)
{
}
+
+#define arm_smmu_sva_domain_alloc NULL
+
#endif /* CONFIG_ARM_SMMU_V3_SVA */
#endif /* _ARM_SMMU_V3_H */
@@ -99,8 +99,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
/* Allocate a new domain and set it on device pasid. */
domain = iommu_sva_domain_alloc(dev, mm);
- if (!domain) {
- ret = -ENOMEM;
+ if (IS_ERR(domain)) {
+ ret = PTR_ERR(domain);
goto out_free_handle;
}
@@ -3621,9 +3621,15 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_domain *domain;
- domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
- if (!domain)
- return NULL;
+ if (ops->domain_alloc_sva) {
+ domain = ops->domain_alloc_sva(dev, mm);
+ if (IS_ERR(domain))
+ return domain;
+ } else {
+ domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+ }
domain->type = IOMMU_DOMAIN_SVA;
mmgrab(mm);
@@ -419,6 +419,7 @@ static inline int __iommu_copy_struct_from_user_array(
* Upon failure, ERR_PTR must be returned.
* @domain_alloc_paging: Allocate an iommu_domain that can be used for
* UNMANAGED, DMA, and DMA_FQ domain types.
+ * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
* @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling
* @probe_finalize: Do final setup work after the device is added to an IOMMU
@@ -459,6 +460,8 @@ struct iommu_ops {
struct device *dev, u32 flags, struct iommu_domain *parent,
const struct iommu_user_data *user_data);
struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
+ struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
+ struct mm_struct *mm);
struct iommu_device *(*probe_device)(struct device *dev);
void (*release_device)(struct device *dev);
Make a new op that receives the device and the mm_struct that the SVA domain should be created for. Unlike domain_alloc_paging() the dev argument is never NULL here. This allows drivers to fully initialize the SVA domain and allocate the mmu_notifier during allocation. It allows the notifier lifetime to follow the lifetime of the iommu_domain. Since we have only one call site, upgrade the new op to return ERR_PTR instead of NULL. Change SMMUv3 to use the new op. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 13 ++++++++----- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 2 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 6 +++++- drivers/iommu/iommu-sva.c | 4 ++-- drivers/iommu/iommu.c | 12 +++++++++--- include/linux/iommu.h | 3 +++ 6 files changed, 28 insertions(+), 12 deletions(-)