@@ -2197,14 +2197,15 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
- struct arm_smmu_domain *smmu_domain;
if (type == IOMMU_DOMAIN_SVA)
return arm_smmu_sva_domain_alloc();
+ return ERR_PTR(-EOPNOTSUPP);
+}
- if (type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_DMA)
- return NULL;
+static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+{
+ struct arm_smmu_domain *smmu_domain;
/*
* Allocate the domain and initialise some of its data structures.
@@ -2213,13 +2214,23 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
*/
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
if (!smmu_domain)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
+ if (dev) {
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ int ret;
+
+ ret = arm_smmu_domain_finalise(smmu_domain, master->smmu);
+ if (ret) {
+ kfree(smmu_domain);
+ return ERR_PTR(ret);
+ }
+ }
return &smmu_domain->domain;
}
@@ -3083,6 +3094,7 @@ static struct iommu_ops arm_smmu_ops = {
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
+ .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,