@@ -5841,11 +5841,44 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}
+static void kvm_mmu_zap_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ bool flush = false;
+
+ write_lock(&kvm->mmu_lock);
+
+ /*
+ * Zapping non-leaf SPTEs, a.k.a. not-last SPTEs, isn't required, worst
+ * case scenario we'll have unused shadow pages lying around until they
+ * are recycled due to age or when the VM is destroyed.
+ */
+ if (is_tdp_mmu_enabled(kvm)) {
+ struct kvm_gfn_range range = {
+ .slot = slot,
+ .start = slot->base_gfn,
+ .end = slot->base_gfn + slot->npages,
+ .may_block = false,
+ };
+
+ flush = kvm_tdp_mmu_unmap_gfn_range(kvm, &range, flush);
+ } else {
+ flush = slot_handle_level(kvm, slot, kvm_zap_rmapp, PG_LEVEL_4K,
+ KVM_MAX_HUGEPAGE_LEVEL, true);
+ }
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+
+ write_unlock(&kvm->mmu_lock);
+}
+
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node)
{
- kvm_mmu_zap_all_fast(kvm);
+ if (kvm_gfn_shared_mask(kvm))
+ kvm_mmu_zap_memslot(kvm, slot);
+ else
+ kvm_mmu_zap_all_fast(kvm);
}
int kvm_mmu_init_vm(struct kvm *kvm)