diff mbox

[5/9] KVM: MMU: KVM: introduce for_each_slot_rmap

Message ID 1430389490-24602-6-git-send-email-guangrong.xiao@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong April 30, 2015, 10:24 a.m. UTC
From: Xiao Guangrong <guangrong.xiao@linux.intel.com>

It is used to clean up the code between kvm_handle_hva_range and
slot_handle_level, also it will be used by later patch

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
---
 arch/x86/kvm/mmu.c | 144 ++++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 99 insertions(+), 45 deletions(-)
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4d98c6c..fea1e83 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1417,6 +1417,63 @@  restart:
 	return 0;
 }
 
+struct slot_rmap_walker {
+	/* input fields. */
+	struct kvm_memory_slot *slot;
+	gfn_t start_gfn;
+	gfn_t end_gfn;
+	int start_level;
+	int end_level;
+
+	/* output fields. */
+	gfn_t gfn;
+	unsigned long *rmap;
+	int level;
+
+	/* private field. */
+	unsigned long *end_rmap;
+};
+
+static void rmap_walk_init_level(struct slot_rmap_walker *walker, int level)
+{
+	struct kvm_memory_slot *slot = walker->slot;
+
+	walker->level = level;
+	walker->gfn = walker->start_gfn;
+	walker->rmap = __gfn_to_rmap(walker->gfn, walker->level, slot);
+	walker->end_rmap = __gfn_to_rmap(walker->end_gfn, walker->level, slot);
+}
+
+static void slot_rmap_walk_init(struct slot_rmap_walker *walker)
+{
+	rmap_walk_init_level(walker, walker->start_level);
+}
+
+static bool slot_rmap_walk_okay(struct slot_rmap_walker *walker)
+{
+	return !!walker->rmap;
+}
+
+static void slot_rmap_walk_next(struct slot_rmap_walker *walker)
+{
+	if (++walker->rmap <= walker->end_rmap) {
+		walker->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(walker->level));
+		return;
+	}
+
+	if (++walker->level > walker->end_level) {
+		walker->rmap = NULL;
+		return;
+	}
+
+	rmap_walk_init_level(walker, walker->level);
+}
+
+#define for_each_slot_rmap(_walker_)				\
+		for (slot_rmap_walk_init(_walker_);		\
+			slot_rmap_walk_okay(_walker_);		\
+				slot_rmap_walk_next(_walker_))
+
 static int kvm_handle_hva_range(struct kvm *kvm,
 				unsigned long start,
 				unsigned long end,
@@ -1428,10 +1485,10 @@  static int kvm_handle_hva_range(struct kvm *kvm,
 					       int level,
 					       unsigned long data))
 {
-	int j;
-	int ret = 0;
 	struct kvm_memslots *slots;
 	struct kvm_memory_slot *memslot;
+	struct slot_rmap_walker walker;
+	int ret = 0;
 
 	slots = kvm_memslots(kvm);
 
@@ -1451,26 +1508,18 @@  static int kvm_handle_hva_range(struct kvm *kvm,
 		gfn_start = hva_to_gfn_memslot(hva_start, memslot);
 		gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
 
-		for (j = PT_PAGE_TABLE_LEVEL;
-		     j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
-			unsigned long idx, idx_end;
-			unsigned long *rmapp;
-			gfn_t gfn = gfn_start;
-
-			/*
-			 * {idx(page_j) | page_j intersects with
-			 *  [hva_start, hva_end)} = {idx, idx+1, ..., idx_end}.
-			 */
-			idx = gfn_to_index(gfn_start, memslot->base_gfn, j);
-			idx_end = gfn_to_index(gfn_end - 1, memslot->base_gfn, j);
-
-			rmapp = __gfn_to_rmap(gfn_start, j, memslot);
-
-			for (; idx <= idx_end;
-			       ++idx, gfn += (1UL << KVM_HPAGE_GFN_SHIFT(j)))
-				ret |= handler(kvm, rmapp++, memslot,
-					       gfn, j, data);
-		}
+		walker = (struct slot_rmap_walker) {
+			.slot = memslot,
+			.start_gfn = gfn_start,
+			.end_gfn = gfn_end - 1,
+			.start_level = PT_PAGE_TABLE_LEVEL,
+			.end_level = PT_PAGE_TABLE_LEVEL +
+					KVM_NR_PAGE_SIZES - 1,
+		};
+
+		for_each_slot_rmap(&walker)
+			ret |= handler(kvm, walker.rmap, memslot, walker.gfn,
+				       walker.level, data);
 	}
 
 	return ret;
@@ -4404,34 +4453,29 @@  typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap);
 
 /* The caller should hold mmu-lock before calling this function. */
 static bool
-slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
-		  slot_level_handler fn, int min_level, int max_level,
-		  bool lock_flush_tlb)
+slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
+			 slot_level_handler fn, int min_level, int max_level,
+			 gfn_t start_gfn, gfn_t last_gfn, bool lock_flush_tlb)
 {
-	unsigned long last_gfn;
 	bool flush = false;
-	int level;
-
-	last_gfn = memslot->base_gfn + memslot->npages - 1;
-
-	for (level = min_level; level <= max_level; ++level) {
-		unsigned long *rmapp;
-		unsigned long last_index, index;
-
-		rmapp = memslot->arch.rmap[level - PT_PAGE_TABLE_LEVEL];
-		last_index = gfn_to_index(last_gfn, memslot->base_gfn, level);
+	struct slot_rmap_walker walker = {
+		.slot = memslot,
+		.start_gfn = start_gfn,
+		.end_gfn = last_gfn,
+		.start_level = min_level,
+		.end_level = max_level,
+	};
 
-		for (index = 0; index <= last_index; ++index, ++rmapp) {
-			if (*rmapp)
-				flush |= fn(kvm, rmapp);
+	for_each_slot_rmap(&walker) {
+		if (*walker.rmap)
+			flush |= fn(kvm, walker.rmap);
 
-			if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
-				if (flush && lock_flush_tlb) {
-					kvm_flush_remote_tlbs(kvm);
-					flush = false;
-				}
-				cond_resched_lock(&kvm->mmu_lock);
+		if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+			if (flush && lock_flush_tlb) {
+				kvm_flush_remote_tlbs(kvm);
+				flush = false;
 			}
+			cond_resched_lock(&kvm->mmu_lock);
 		}
 	}
 
@@ -4439,6 +4483,16 @@  slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
 }
 
 static bool
+slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+		  slot_level_handler fn, int min_level, int max_level,
+		  bool lock_flush_tlb)
+{
+	return slot_handle_level_range(kvm, memslot, fn, min_level,
+		max_level, memslot->base_gfn,
+		memslot->base_gfn + memslot->npages - 1, lock_flush_tlb);
+}
+
+static bool
 slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
 		      slot_level_handler fn, bool lock_flush_tlb)
 {