@@ -2140,6 +2140,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return (irq_remapping_enabled == 1);
case IOMMU_CAP_NOEXEC:
return false;
+ case IOMMU_CAP_VIOMMU_HINT:
+ return amd_iommu_np_cache;
default:
break;
}
@@ -294,6 +294,7 @@ static inline void context_clear_entry(struct context_entry *context)
*/
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
+static int intel_caching_mode;
#define for_each_domain_iommu(idx, domain) \
for (idx = 0; idx < g_num_of_iommus; idx++) \
@@ -3253,6 +3254,8 @@ static int __init init_dmars(void)
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
+ if (cap_caching_mode(iommu->cap))
+ intel_caching_mode = 1;
intel_svm_check(iommu);
}
@@ -5113,6 +5116,8 @@ static bool intel_iommu_capable(enum iommu_cap cap)
return domain_update_iommu_snooping(NULL) == 1;
if (cap == IOMMU_CAP_INTR_REMAP)
return irq_remapping_enabled == 1;
+ if (cap == IOMMU_CAP_VIOMMU_HINT)
+ return intel_caching_mode;
return false;
}
@@ -931,7 +931,16 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
+static bool viommu_capable(enum iommu_cap cap)
+{
+ if (cap == IOMMU_CAP_VIOMMU_HINT)
+ return true;
+
+ return false;
+}
+
static struct iommu_ops viommu_ops = {
+ .capable = viommu_capable,
.domain_alloc = viommu_domain_alloc,
.domain_free = viommu_domain_free,
.attach_dev = viommu_attach_dev,
@@ -94,6 +94,8 @@ enum iommu_cap {
transactions */
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
+ IOMMU_CAP_VIOMMU_HINT, /* IOMMU can detect a hit for running in
+ VM */
};
/*