@@ -1944,7 +1944,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
flags);
}
-static bool dev_is_real_dma_subdevice(struct device *dev)
+bool dev_is_real_dma_subdevice(struct device *dev)
{
return dev && dev_is_pci(dev) &&
pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
@@ -818,6 +818,11 @@ domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
return info->did;
}
+static inline int domain_type_is_nested(struct dmar_domain *domain)
+{
+ return domain->domain.type == IOMMU_DOMAIN_NESTED;
+}
+
/*
* 0: readable
* 1: writable
@@ -1225,6 +1230,8 @@ void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
*/
#define QI_OPT_WAIT_DRAIN BIT(0)
+bool dev_is_real_dma_subdevice(struct device *dev);
+
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void device_block_translation(struct device *dev);
@@ -130,8 +130,51 @@ static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
return ret;
}
+static int intel_nested_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct intel_iommu *iommu = info->iommu;
+ struct dev_pasid_info *dev_pasid;
+ int ret;
+
+ /* No SVA domain replacement usage so far */
+ if (old && old->type == IOMMU_DOMAIN_SVA)
+ return -EOPNOTSUPP;
+
+ if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
+ return -EOPNOTSUPP;
+
+ if (context_copied(iommu, info->bus, info->devfn))
+ return -EBUSY;
+
+ ret = prepare_domain_attach_device(&dmar_domain->s2_domain->domain,
+ dev);
+ if (ret)
+ return ret;
+
+ dev_pasid = domain_add_dev_pasid(domain, dev, pasid);
+ if (IS_ERR(dev_pasid))
+ return PTR_ERR(dev_pasid);
+
+ ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old);
+ if (ret)
+ goto out_remove_dev_pasid;
+
+ domain_remove_dev_pasid(old, dev, pasid);
+
+ return 0;
+
+out_remove_dev_pasid:
+ domain_remove_dev_pasid(domain, dev, pasid);
+ return ret;
+}
+
static const struct iommu_domain_ops intel_nested_domain_ops = {
.attach_dev = intel_nested_attach_dev,
+ .set_dev_pasid = intel_nested_set_dev_pasid,
.free = intel_nested_domain_free,
.cache_invalidate_user = intel_nested_cache_invalidate_user,
};
@@ -335,6 +335,17 @@ static inline int domain_setup_passthrough(struct intel_iommu *iommu,
return intel_pasid_setup_pass_through(iommu, dev, pasid);
}
+static inline int domain_setup_nested(struct intel_iommu *iommu,
+ struct dmar_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old)
+{
+ if (old)
+ return intel_pasid_replace_nested(iommu, dev,
+ pasid, domain);
+ return intel_pasid_setup_nested(iommu, dev, pasid, domain);
+}
+
void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
struct device *dev, u32 pasid,
bool fault_ignore);