diff mbox series

[6/7] KVM: x86/MMU: Move rmap zap operations to rmap.c

Message ID 20221206173601.549281-7-bgardon@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/MMU: Factor rmap operations out of mmu.c | expand

Commit Message

Ben Gardon Dec. 6, 2022, 5:36 p.m. UTC
Move the various rmap zap functions to rmap.c. These functions are less
"pure" rmap operations in that they also contain some SPTE manipulation,
however they're mostly about rmap / pte list manipulation.

No functional change intended.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu/mmu.c          | 51 +--------------------------------
 arch/x86/kvm/mmu/mmu_internal.h |  1 +
 arch/x86/kvm/mmu/rmap.c         | 50 +++++++++++++++++++++++++++++++-
 arch/x86/kvm/mmu/rmap.h         |  9 +++++-
 4 files changed, 59 insertions(+), 52 deletions(-)

Comments

David Matlack Dec. 9, 2022, 10:44 p.m. UTC | #1
On Tue, Dec 06, 2022 at 05:36:00PM +0000, Ben Gardon wrote:
> Move the various rmap zap functions to rmap.c. These functions are less
> "pure" rmap operations in that they also contain some SPTE manipulation,
> however they're mostly about rmap / pte list manipulation.
> 
> No functional change intended.
> 
> Signed-off-by: Ben Gardon <bgardon@google.com>
> ---
[...]
> -static void kvm_zap_one_rmap_spte(struct kvm *kvm,
> -				  struct kvm_rmap_head *rmap_head, u64 *sptep)
> -{
> -	mmu_spte_clear_track_bits(kvm, sptep);
> -	pte_list_remove(sptep, rmap_head);
> -}
> -
> -/* Return true if at least one SPTE was zapped, false otherwise */
> -static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
> -				   struct kvm_rmap_head *rmap_head)
> -{
> -	struct pte_list_desc *desc, *next;
> -	int i;
> -
> -	if (!rmap_head->val)
> -		return false;
> -
> -	if (!(rmap_head->val & 1)) {
> -		mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
> -		goto out;
> -	}
> -
> -	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
> -
> -	for (; desc; desc = next) {
> -		for (i = 0; i < desc->spte_count; i++)
> -			mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
> -		next = desc->more;
> -		free_pte_list_desc(desc);
> -	}
> -out:
> -	/* rmap_head is meaningless now, remember to reset it */
> -	rmap_head->val = 0;
> -	return true;
> -}

I don't like moving the rmap zap functions into rmap.c, because they are
more mmu.c functions, as you note in the commit description. e.g. It's
odd to have kvm_zap_all_rmap_sptes() in rmap.c but not, say
__rmap_clear_dirty().

I get your point though that kvm_zap_all_rmap_sptes() has to know
intimate details of the pte_list_desc structure. It would be nice to
keep those details isolated to rmap.c.

What about keeping the zap functions mmu.c and just provide a better API
for kvm_zap_all_rmap_sptes() to process the rmap entries?

e.g.

mmu.c:

static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
				   struct kvm_rmap_head *rmap_head)
{
	struct rmap_iterator iter;
	bool flush = false;
	u64 *sptep;

	for_each_rmap_spte(rmap_head, &iter, sptep) {
		mmu_spte_clear_track_bits(kvm, sptep);
		flush = true;
	}

	pte_list_free_all(rmap_head);  // <-- implemented in rmap.c
	return flush;
}

This should be about as efficient as the current approach (same big-O
notation at least) and maintain the separation of pte_list_desc
internals in rmap.c.
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 88da2abc2375..12082314d82d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -512,7 +512,7 @@  static bool mmu_spte_update(u64 *sptep, u64 new_spte)
  * state bits, it is used to clear the last level sptep.
  * Returns the old PTE.
  */
-static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
+u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
 {
 	kvm_pfn_t pfn;
 	u64 old_spte = *sptep;
@@ -855,42 +855,6 @@  gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
 	return slot;
 }
 
-static void kvm_zap_one_rmap_spte(struct kvm *kvm,
-				  struct kvm_rmap_head *rmap_head, u64 *sptep)
-{
-	mmu_spte_clear_track_bits(kvm, sptep);
-	pte_list_remove(sptep, rmap_head);
-}
-
-/* Return true if at least one SPTE was zapped, false otherwise */
-static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
-				   struct kvm_rmap_head *rmap_head)
-{
-	struct pte_list_desc *desc, *next;
-	int i;
-
-	if (!rmap_head->val)
-		return false;
-
-	if (!(rmap_head->val & 1)) {
-		mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
-		goto out;
-	}
-
-	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
-
-	for (; desc; desc = next) {
-		for (i = 0; i < desc->spte_count; i++)
-			mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
-		next = desc->more;
-		free_pte_list_desc(desc);
-	}
-out:
-	/* rmap_head is meaningless now, remember to reset it */
-	rmap_head->val = 0;
-	return true;
-}
-
 static void drop_spte(struct kvm *kvm, u64 *sptep)
 {
 	u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
@@ -1145,19 +1109,6 @@  static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
 }
 
-static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-			   const struct kvm_memory_slot *slot)
-{
-	return kvm_zap_all_rmap_sptes(kvm, rmap_head);
-}
-
-static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-			 struct kvm_memory_slot *slot, gfn_t gfn, int level,
-			 pte_t unused)
-{
-	return __kvm_zap_rmap(kvm, rmap_head, slot);
-}
-
 static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
 			     struct kvm_memory_slot *slot, gfn_t gfn, int level,
 			     pte_t pte)
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 3de703c2a5d4..a219c8e556e9 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -319,4 +319,5 @@  void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
 
 gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
+u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep);
 #endif /* __KVM_X86_MMU_INTERNAL_H */
diff --git a/arch/x86/kvm/mmu/rmap.c b/arch/x86/kvm/mmu/rmap.c
index 91af5b32cffb..9cc4252aaabb 100644
--- a/arch/x86/kvm/mmu/rmap.c
+++ b/arch/x86/kvm/mmu/rmap.c
@@ -56,7 +56,7 @@  int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
 	return count;
 }
 
-void free_pte_list_desc(struct pte_list_desc *pte_list_desc)
+static void free_pte_list_desc(struct pte_list_desc *pte_list_desc)
 {
 	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
 }
@@ -283,3 +283,51 @@  void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
 
 	rmap_walk_init_level(iterator, iterator->level);
 }
+
+void kvm_zap_one_rmap_spte(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+			   u64 *sptep)
+{
+	mmu_spte_clear_track_bits(kvm, sptep);
+	pte_list_remove(sptep, rmap_head);
+}
+
+/* Return true if at least one SPTE was zapped, false otherwise */
+bool kvm_zap_all_rmap_sptes(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
+{
+	struct pte_list_desc *desc, *next;
+	int i;
+
+	if (!rmap_head->val)
+		return false;
+
+	if (!(rmap_head->val & 1)) {
+		mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
+		goto out;
+	}
+
+	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+
+	for (; desc; desc = next) {
+		for (i = 0; i < desc->spte_count; i++)
+			mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
+		next = desc->more;
+		free_pte_list_desc(desc);
+	}
+out:
+	/* rmap_head is meaningless now, remember to reset it */
+	rmap_head->val = 0;
+	return true;
+}
+
+bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+		    const struct kvm_memory_slot *slot)
+{
+	return kvm_zap_all_rmap_sptes(kvm, rmap_head);
+}
+
+bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+		  struct kvm_memory_slot *slot, gfn_t gfn, int level,
+		  pte_t unused)
+{
+	return __kvm_zap_rmap(kvm, rmap_head, slot);
+}
diff --git a/arch/x86/kvm/mmu/rmap.h b/arch/x86/kvm/mmu/rmap.h
index dc4bf7e609ec..a9bf48494e1a 100644
--- a/arch/x86/kvm/mmu/rmap.h
+++ b/arch/x86/kvm/mmu/rmap.h
@@ -27,7 +27,6 @@  static struct kmem_cache *pte_list_desc_cache;
 
 int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
 		 struct kvm_rmap_head *rmap_head);
-void free_pte_list_desc(struct pte_list_desc *pte_list_desc);
 void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head);
 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
 
@@ -90,4 +89,12 @@  typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
 			       struct kvm_memory_slot *slot, gfn_t gfn,
 			       int level, pte_t pte);
 
+void kvm_zap_one_rmap_spte(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+			   u64 *sptep);
+bool kvm_zap_all_rmap_sptes(struct kvm *kvm, struct kvm_rmap_head *rmap_head);
+bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+		    const struct kvm_memory_slot *slot);
+bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+		  struct kvm_memory_slot *slot, gfn_t gfn, int level,
+		  pte_t unused);
 #endif /* __KVM_X86_MMU_RMAP_H */