@@ -7427,13 +7427,32 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
kvm_mmu_uninit_direct_mmu(kvm);
}
+void kvm_zap_slot_gfn_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ gfn_t start, gfn_t end)
+{
+ write_lock(&kvm->mmu_lock);
+ if (kvm->arch.direct_mmu_enabled) {
+ zap_direct_gfn_range(kvm, memslot->as_id, start, end,
+ MMU_READ_LOCK);
+ }
+
+ if (kvm->arch.pure_direct_mmu) {
+ write_unlock(&kvm->mmu_lock);
+ return;
+ }
+
+ slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
+ PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
+ start, end - 1, true);
+ write_unlock(&kvm->mmu_lock);
+}
+
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i;
- write_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots) {
@@ -7444,13 +7463,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
if (start >= end)
continue;
- slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
- PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
- start, end - 1, true);
+ kvm_zap_slot_gfn_range(kvm, memslot, start, end);
}
}
-
- write_unlock(&kvm->mmu_lock);
}
static bool slot_rmap_write_protect(struct kvm *kvm,
@@ -204,6 +204,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
+void kvm_zap_slot_gfn_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ gfn_t start, gfn_t end);
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
Add a function for zapping ranges of GFNs in a memslot to support kvm_zap_gfn_range for the direct MMU. Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu.c | 27 +++++++++++++++++++++------ arch/x86/kvm/mmu.h | 2 ++ 2 files changed, 23 insertions(+), 6 deletions(-)