@@ -751,6 +751,8 @@ void intel_svm_unbind(struct iommu_sva *handle);
u32 intel_svm_get_pasid(struct iommu_sva *handle);
int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
struct iommu_page_response *msg);
+struct iommu_domain *intel_svm_domain_alloc(void);
+void intel_svm_block_dev_pasid(struct device *dev, ioasid_t pasid);
struct intel_svm_dev {
struct list_head list;
@@ -776,6 +778,13 @@ struct intel_svm {
};
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
+static inline struct iommu_domain *intel_svm_domain_alloc(void)
+{
+ return NULL;
+}
+static inline void intel_svm_block_dev_pasid(struct device *dev, ioasid_t pasid)
+{
+}
#endif
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
@@ -4137,9 +4137,25 @@ static int blocking_domain_attach_dev(struct iommu_domain *domain,
return 0;
}
+static int blocking_domain_set_dev_pasid(struct iommu_domain *_domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
+
+ if (domain)
+ intel_svm_block_dev_pasid(dev, pasid);
+
+ return 0;
+}
+
static struct iommu_domain blocking_domain = {
.ops = &(const struct iommu_domain_ops) {
- .attach_dev = blocking_domain_attach_dev
+ .attach_dev = blocking_domain_attach_dev,
+ .set_dev_pasid = blocking_domain_set_dev_pasid,
}
};
@@ -4174,6 +4190,8 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
return domain;
case IOMMU_DOMAIN_IDENTITY:
return &si_domain->domain;
+ case IOMMU_DOMAIN_SVA:
+ return intel_svm_domain_alloc();
default:
return NULL;
}
@@ -928,3 +928,50 @@ int intel_svm_page_response(struct device *dev,
mutex_unlock(&pasid_mutex);
return ret;
}
+
+void intel_svm_block_dev_pasid(struct device *dev, ioasid_t pasid)
+{
+ mutex_lock(&pasid_mutex);
+ intel_svm_unbind_mm(dev, pasid);
+ mutex_unlock(&pasid_mutex);
+}
+
+static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct mm_struct *mm = domain->mm;
+ struct iommu_sva *sva;
+ int ret = 0;
+
+ mutex_lock(&pasid_mutex);
+ sva = intel_svm_bind_mm(iommu, dev, mm);
+ if (IS_ERR(sva))
+ ret = PTR_ERR(sva);
+ mutex_unlock(&pasid_mutex);
+
+ return ret;
+}
+
+static void intel_svm_domain_free(struct iommu_domain *domain)
+{
+ kfree(to_dmar_domain(domain));
+}
+
+static const struct iommu_domain_ops intel_svm_domain_ops = {
+ .set_dev_pasid = intel_svm_set_dev_pasid,
+ .free = intel_svm_domain_free
+};
+
+struct iommu_domain *intel_svm_domain_alloc(void)
+{
+ struct dmar_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+ domain->domain.ops = &intel_svm_domain_ops;
+
+ return &domain->domain;
+}