@@ -3189,8 +3189,6 @@ static int arm_smmu_attach_dev_nested(struct iommu_domain *domain,
.master = master,
.old_domain = iommu_get_domain_for_dev(dev),
.ssid = IOMMU_NO_PASID,
- /* Currently invalidation of ATC is not supported */
- .disable_ats = true,
};
struct arm_smmu_ste ste;
int ret;
@@ -3200,6 +3198,15 @@ static int arm_smmu_attach_dev_nested(struct iommu_domain *domain,
return -EINVAL;
mutex_lock(&arm_smmu_asid_lock);
+ /*
+ * The VM has to control the actual ATS state at the PCI device because
+ * we forward the invalidations directly from the VM. If the VM doesn't
+ * think ATS is on it will not generate ATC flushes and the ATC will
+ * become incoherent. Since we can't access the actual virtual PCI ATS
+ * config bit here base this off the EATS value in the STE. If the EATS
+ * is set then the VM must generate ATC flushes.
+ */
+ state.disable_ats = !nested_domain->enable_ats;
ret = arm_smmu_attach_prepare(&state, domain);
if (ret) {
mutex_unlock(&arm_smmu_asid_lock);
@@ -3420,8 +3427,9 @@ arm_smmu_domain_alloc_nesting(struct device *dev, u32 flags,
cfg != STRTAB_STE_0_CFG_S1_TRANS)
return ERR_PTR(-EIO);
+ /* Only Full ATS or ATS UR is supported */
eats = FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(arg.ste[1]));
- if (eats != STRTAB_STE_1_EATS_ABT)
+ if (eats != STRTAB_STE_1_EATS_ABT && eats != STRTAB_STE_1_EATS_TRANS)
return ERR_PTR(-EIO);
if (cfg != STRTAB_STE_0_CFG_S1_TRANS)
@@ -3434,6 +3442,7 @@ arm_smmu_domain_alloc_nesting(struct device *dev, u32 flags,
nested_domain->domain.type = IOMMU_DOMAIN_NESTED;
nested_domain->domain.ops = &arm_smmu_nested_ops;
nested_domain->s2_parent = smmu_parent;
+ nested_domain->enable_ats = eats == STRTAB_STE_1_EATS_TRANS;
nested_domain->ste[0] = arg.ste[0];
nested_domain->ste[1] = arg.ste[1] & ~cpu_to_le64(STRTAB_STE_1_EATS);
@@ -811,6 +811,7 @@ struct arm_smmu_domain {
struct arm_smmu_nested_domain {
struct iommu_domain domain;
struct arm_smmu_domain *s2_parent;
+ u8 enable_ats : 1;
__le64 ste[2];
};