@@ -68,9 +68,67 @@ static void intel_nested_domain_free(struct iommu_domain *domain)
kfree(to_dmar_domain(domain));
}
+static void domain_flush_iotlb_psi(struct dmar_domain *domain,
+ u64 addr, unsigned long npages)
+{
+ struct iommu_domain_info *info;
+ unsigned long i;
+
+ xa_for_each(&domain->iommu_array, i, info)
+ iommu_flush_iotlb_psi(info->iommu, domain,
+ addr >> VTD_PAGE_SHIFT, npages, 1, 0);
+}
+
+static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
+ struct iommu_user_data_array *array,
+ u32 *cerror_idx)
+{
+ const size_t min_len =
+ offsetofend(struct iommu_hwpt_vtd_s1_invalidate, __reserved);
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct iommu_hwpt_vtd_s1_invalidate inv_info;
+ u32 index;
+ int ret;
+
+ /* REVISIT:
+ * VT-d has defined ITE, ICE, IQE for invalidation failure per hardware,
+ * but no error code yet, so just set the error code to be 0.
+ */
+ *cerror_idx = 0;
+
+ if (array->entry_len < min_len)
+ return -EINVAL;
+
+ for (index = 0; index < array->entry_num; index++) {
+ ret = iommu_copy_user_data_from_array(&inv_info, array, index,
+ sizeof(inv_info), min_len);
+ if (ret) {
+ pr_err_ratelimited("Failed to fetch invalidation request\n");
+ break;
+ }
+
+ if (inv_info.__reserved || (inv_info.flags & ~IOMMU_VTD_QI_FLAGS_LEAF) ||
+ !IS_ALIGNED(inv_info.addr, VTD_PAGE_SIZE)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (inv_info.addr == 0 && inv_info.npages == -1)
+ intel_flush_iotlb_all(domain);
+ else
+ domain_flush_iotlb_psi(dmar_domain,
+ inv_info.addr, inv_info.npages);
+ }
+
+ array->entry_num = index;
+
+ return ret;
+}
+
static const struct iommu_domain_ops intel_nested_domain_ops = {
.attach_dev = intel_nested_attach_dev,
.free = intel_nested_domain_free,
+ .cache_invalidate_user = intel_nested_cache_invalidate_user,
};
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *s2_domain,