diff mbox

[v2,7/9] KVM: MMU: introduce kvm_zap_gfn_range

Message ID 1431397953-16642-8-git-send-email-guangrong.xiao@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong May 12, 2015, 2:32 a.m. UTC
It is used to zap all the rmaps of the specified gfn range and will
be used by the later patch

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
---
 arch/x86/kvm/mmu.c | 25 +++++++++++++++++++++++++
 arch/x86/kvm/mmu.h |  1 +
 2 files changed, 26 insertions(+)
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 10d5e03..8c400dc 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4526,6 +4526,31 @@  slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
 				 PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
 }
 
+void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
+{
+	struct kvm_memslots *slots;
+	struct kvm_memory_slot *memslot;
+
+	slots = kvm_memslots(kvm);
+
+	spin_lock(&kvm->mmu_lock);
+	kvm_for_each_memslot(memslot, slots) {
+		gfn_t start, end;
+
+		start = max(gfn_start, memslot->base_gfn);
+		end = min(gfn_end, memslot->base_gfn + memslot->npages);
+		if (start >= end)
+			continue;
+
+		slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
+				PT_PAGE_TABLE_LEVEL,
+				PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1,
+				start, end - 1, true);
+	}
+
+	spin_unlock(&kvm->mmu_lock);
+}
+
 static bool slot_rmap_write_protect(struct kvm *kvm, unsigned long *rmapp)
 {
 	return __rmap_write_protect(kvm, rmapp, false);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 06eb2fc..deec5a8 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -172,4 +172,5 @@  static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 }
 
 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
+void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
 #endif