[v2,3/9] KVM: MMU: introduce for_each_slot_rmap_range
diff mbox

Message ID 1431397953-16642-4-git-send-email-guangrong.xiao@linux.intel.com
State New
Headers show

Commit Message

Xiao Guangrong May 12, 2015, 2:32 a.m. UTC
It's used to abstract the code from kvm_handle_hva_range and it will be
used by later patch

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
---
 arch/x86/kvm/mmu.c | 97 +++++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 75 insertions(+), 22 deletions(-)

Comments

Paolo Bonzini May 12, 2015, 8:22 a.m. UTC | #1
On 12/05/2015 04:32, Xiao Guangrong wrote:
> +#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
> +	   _start_gfn, _end_gfn, _iter_)				\
> +	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
> +				 _end_level_, _start_gfn, _end_gfn);	\
> +	     slot_rmap_walk_okay(_iter_);				\
> +	     slot_rmap_walk_next(_iter_))
> +


> +		for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
> +		   PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1,


What about adding a

#define for PT_MAX_HUGEPAGE_LEVEL  (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1)

?

> +		   gfn_start, gfn_end - 1, &iterator)
> +			ret |= handler(kvm, iterator.rmap, memslot,
> +				       iterator.gfn, iterator.level, data);

I prefer to indent by two tabs:

		for_each_slot_rmap_range(memslot,
				PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
				gfn_start, gfn_end - 1, &iterator)
			ret |= handler(kvm, iterator.rmap, memslot,
				       iterator.gfn, iterator.level, data);


Same in the next patch:

	for_each_slot_rmap_range(memslot,
			start_level, end_level,
			start_gfn, end_gfn, &iterator) {
		if (iterator.rmap)
			flush |= fn(kvm, iterator.rmap);
		...
	}
Thanks,

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xiao Guangrong May 12, 2015, 9:38 a.m. UTC | #2
On 05/12/2015 04:22 PM, Paolo Bonzini wrote:
>
>
> On 12/05/2015 04:32, Xiao Guangrong wrote:
>> +#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
>> +	   _start_gfn, _end_gfn, _iter_)				\
>> +	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
>> +				 _end_level_, _start_gfn, _end_gfn);	\
>> +	     slot_rmap_walk_okay(_iter_);				\
>> +	     slot_rmap_walk_next(_iter_))
>> +
>
>
>> +		for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
>> +		   PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1,
>
>
> What about adding a
>
> #define for PT_MAX_HUGEPAGE_LEVEL  (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1)

Good to me.

>
> ?
>
>> +		   gfn_start, gfn_end - 1, &iterator)
>> +			ret |= handler(kvm, iterator.rmap, memslot,
>> +				       iterator.gfn, iterator.level, data);
>
> I prefer to indent by two tabs:
>
> 		for_each_slot_rmap_range(memslot,
> 				PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
> 				gfn_start, gfn_end - 1, &iterator)
> 			ret |= handler(kvm, iterator.rmap, memslot,
> 				       iterator.gfn, iterator.level, data);
>
>
> Same in the next patch:
>
> 	for_each_slot_rmap_range(memslot,
> 			start_level, end_level,
> 			start_gfn, end_gfn, &iterator) {
> 		if (iterator.rmap)
> 			flush |= fn(kvm, iterator.rmap);
> 		...
> 	}

looks nice, will follow your style. :)

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Patch
diff mbox

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0da9cf0..98b7a6a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1417,6 +1417,74 @@  restart:
 	return 0;
 }
 
+struct slot_rmap_walk_iterator {
+	/* input fields. */
+	struct kvm_memory_slot *slot;
+	gfn_t start_gfn;
+	gfn_t end_gfn;
+	int start_level;
+	int end_level;
+
+	/* output fields. */
+	gfn_t gfn;
+	unsigned long *rmap;
+	int level;
+
+	/* private field. */
+	unsigned long *end_rmap;
+};
+
+static void
+rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
+{
+	iterator->level = level;
+	iterator->gfn = iterator->start_gfn;
+	iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
+	iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
+					   iterator->slot);
+}
+
+static void
+slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
+		    struct kvm_memory_slot *slot, int start_level,
+		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
+{
+	iterator->slot = slot;
+	iterator->start_level = start_level;
+	iterator->end_level = end_level;
+	iterator->start_gfn = start_gfn;
+	iterator->end_gfn = end_gfn;
+
+	rmap_walk_init_level(iterator, iterator->start_level);
+}
+
+static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
+{
+	return !!iterator->rmap;
+}
+
+static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
+{
+	if (++iterator->rmap <= iterator->end_rmap) {
+		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
+		return;
+	}
+
+	if (++iterator->level > iterator->end_level) {
+		iterator->rmap = NULL;
+		return;
+	}
+
+	rmap_walk_init_level(iterator, iterator->level);
+}
+
+#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
+	   _start_gfn, _end_gfn, _iter_)				\
+	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
+				 _end_level_, _start_gfn, _end_gfn);	\
+	     slot_rmap_walk_okay(_iter_);				\
+	     slot_rmap_walk_next(_iter_))
+
 static int kvm_handle_hva_range(struct kvm *kvm,
 				unsigned long start,
 				unsigned long end,
@@ -1428,10 +1496,10 @@  static int kvm_handle_hva_range(struct kvm *kvm,
 					       int level,
 					       unsigned long data))
 {
-	int j;
-	int ret = 0;
 	struct kvm_memslots *slots;
 	struct kvm_memory_slot *memslot;
+	struct slot_rmap_walk_iterator iterator;
+	int ret = 0;
 
 	slots = kvm_memslots(kvm);
 
@@ -1451,26 +1519,11 @@  static int kvm_handle_hva_range(struct kvm *kvm,
 		gfn_start = hva_to_gfn_memslot(hva_start, memslot);
 		gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
 
-		for (j = PT_PAGE_TABLE_LEVEL;
-		     j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
-			unsigned long idx, idx_end;
-			unsigned long *rmapp;
-			gfn_t gfn = gfn_start;
-
-			/*
-			 * {idx(page_j) | page_j intersects with
-			 *  [hva_start, hva_end)} = {idx, idx+1, ..., idx_end}.
-			 */
-			idx = gfn_to_index(gfn_start, memslot->base_gfn, j);
-			idx_end = gfn_to_index(gfn_end - 1, memslot->base_gfn, j);
-
-			rmapp = __gfn_to_rmap(gfn_start, j, memslot);
-
-			for (; idx <= idx_end;
-			       ++idx, gfn += (1UL << KVM_HPAGE_GFN_SHIFT(j)))
-				ret |= handler(kvm, rmapp++, memslot,
-					       gfn, j, data);
-		}
+		for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
+		   PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1,
+		   gfn_start, gfn_end - 1, &iterator)
+			ret |= handler(kvm, iterator.rmap, memslot,
+				       iterator.gfn, iterator.level, data);
 	}
 
 	return ret;