@@ -35,6 +35,9 @@ static unsigned long io_map_base;
#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
+/* Flags controlling S2 unmapping */
+#define KVM_UNMAP_ELIDE_CMO (1UL << 0)
+
#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
@@ -253,7 +256,8 @@ static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
kvm_tlb_flush_vmid_ipa(kvm, addr);
/* No need to invalidate the cache for device mappings */
- if (!kvm_is_device_pfn(pte_pfn(old_pte)))
+ if (!kvm_is_device_pfn(pte_pfn(old_pte)) &&
+ !(flags & KVM_UNMAP_ELIDE_CMO))
kvm_flush_dcache_pte(old_pte);
put_page(virt_to_page(pte));
@@ -281,7 +285,8 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
- kvm_flush_dcache_pmd(old_pmd);
+ if (!(flags & KVM_UNMAP_ELIDE_CMO))
+ kvm_flush_dcache_pmd(old_pmd);
put_page(virt_to_page(pmd));
} else {
@@ -310,7 +315,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
stage2_pud_clear(kvm, pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
- kvm_flush_dcache_pud(old_pud);
+ if (!(flags & KVM_UNMAP_ELIDE_CMO))
+ kvm_flush_dcache_pud(old_pud);
put_page(virt_to_page(pud));
} else {
unmap_stage2_pmds(kvm, pud, addr, next, flags);
In order to allow the elision of cache maintenance operations on unmap, add a new flag (KVM_UNMAP_ELIDE_CMO) that a caller can use to indicate that CMOs are not required. Nobody is passing this flag yet, hence no functional change. Signed-off-by: Marc Zyngier <maz@kernel.org> --- virt/kvm/arm/mmu.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-)