diff mbox series

[09/23] KVM: PPC: Book3S HV: Nested: Improve comments and naming of nest rmap functions

Message ID 20190826062109.7573-10-sjitindarsingh@gmail.com (mailing list archive)
State New, archived
Headers show
Series KVM: PPC: BOok3S HV: Support for nested HPT guests | expand

Commit Message

Suraj Jitindar Singh Aug. 26, 2019, 6:20 a.m. UTC
The nested rmap entries are used to track nested pages which map a given
guest page such that that information can be retrieved from the guest
memslot.

Improve the naming of some of these functions such that it's clearer
what they do, the functions with remove in the name remove the rmap
_and_ perform an invalidation so rename them invalidate to reflect this.

kvmhv_insert_nest_rmap() takes a kvm struct as an argument which is unused,
so remove this argument.

Additionally improve the function comments and add information about which
locks must be held for clarity.

No functional change.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/include/asm/kvm_book3s_64.h |  4 +--
 arch/powerpc/kvm/book3s_64_mmu_radix.c   |  8 +++---
 arch/powerpc/kvm/book3s_hv_nested.c      | 49 +++++++++++++++++++++++---------
 3 files changed, 42 insertions(+), 19 deletions(-)
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index bb7c8cc77f1a..bec78f15e2f5 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -624,12 +624,12 @@  extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
 			     unsigned long gpa, unsigned int level,
 			     unsigned long mmu_seq, unsigned int lpid,
 			     unsigned long *rmapp, struct rmap_nested **n_rmap);
-extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
+extern void kvmhv_insert_nest_rmap(unsigned long *rmapp,
 				   struct rmap_nested **n_rmap);
 extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
 					   unsigned long clr, unsigned long set,
 					   unsigned long hpa, unsigned long nbytes);
-extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
+extern void kvmhv_invalidate_nest_rmap_range(struct kvm *kvm,
 				const struct kvm_memory_slot *memslot,
 				unsigned long gpa, unsigned long hpa,
 				unsigned long nbytes);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 310d8dde9a48..48b844d33dc9 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -405,7 +405,7 @@  void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
 
 	gpa &= ~(page_size - 1);
 	hpa = old & PTE_RPN_MASK;
-	kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
+	kvmhv_invalidate_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
 
 	if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
 		kvmppc_update_dirty_map(memslot, gfn, page_size);
@@ -643,7 +643,7 @@  int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
 		}
 		kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
 		if (rmapp && n_rmap)
-			kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
+			kvmhv_insert_nest_rmap(rmapp, n_rmap);
 		ret = 0;
 		goto out_unlock;
 	}
@@ -695,7 +695,7 @@  int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
 		}
 		kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
 		if (rmapp && n_rmap)
-			kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
+			kvmhv_insert_nest_rmap(rmapp, n_rmap);
 		ret = 0;
 		goto out_unlock;
 	}
@@ -721,7 +721,7 @@  int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
 	}
 	kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
 	if (rmapp && n_rmap)
-		kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
+		kvmhv_insert_nest_rmap(rmapp, n_rmap);
 	ret = 0;
 
  out_unlock:
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 68d492e8861e..555b45a35fec 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -776,8 +776,8 @@  static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
 				       RMAP_NESTED_GPA_MASK));
 }
 
-void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
-			    struct rmap_nested **n_rmap)
+/* called with kvm->mmu_lock held */
+void kvmhv_insert_nest_rmap(unsigned long *rmapp, struct rmap_nested **n_rmap)
 {
 	struct llist_node *entry = ((struct llist_head *) rmapp)->first;
 	struct rmap_nested *cursor;
@@ -808,6 +808,11 @@  void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
 	*n_rmap = NULL;
 }
 
+/*
+ * called with kvm->mmu_lock held
+ * Given a single rmap entry, update the rc bits in the corresponding shadow
+ * pte. Should only be used to clear rc bits.
+ */
 static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
 				      unsigned long clr, unsigned long set,
 				      unsigned long hpa, unsigned long mask)
@@ -838,8 +843,10 @@  static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
 }
 
 /*
+ * called with kvm->mmu_lock held
  * For a given list of rmap entries, update the rc bits in all ptes in shadow
  * page tables for nested guests which are referenced by the rmap list.
+ * Should only be used to clear rc bits.
  */
 void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
 				    unsigned long clr, unsigned long set,
@@ -859,8 +866,12 @@  void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
 		kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
 }
 
-static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
-				   unsigned long hpa, unsigned long mask)
+/*
+ * called with kvm->mmu_lock held
+ * Given a single rmap entry, invalidate the corresponding shadow pte.
+ */
+static void kvmhv_invalidate_nest_rmap(struct kvm *kvm, u64 n_rmap,
+				       unsigned long hpa, unsigned long mask)
 {
 	struct kvm_nested_guest *gp;
 	unsigned long gpa;
@@ -880,24 +891,35 @@  static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
 }
 
-static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
-					unsigned long hpa, unsigned long mask)
+/*
+ * called with kvm->mmu_lock held
+ * For a given list of rmap entries, invalidate the corresponding shadow ptes
+ * for nested guests which are referenced by the rmap list.
+ */
+static void kvmhv_invalidate_nest_rmap_list(struct kvm *kvm,
+					    unsigned long *rmapp,
+					    unsigned long hpa,
+					    unsigned long mask)
 {
 	struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
 	struct rmap_nested *cursor;
 	unsigned long rmap;
 
 	for_each_nest_rmap_safe(cursor, entry, &rmap) {
-		kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
+		kvmhv_invalidate_nest_rmap(kvm, rmap, hpa, mask);
 		kfree(cursor);
 	}
 }
 
-/* called with kvm->mmu_lock held */
-void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
-				  const struct kvm_memory_slot *memslot,
-				  unsigned long gpa, unsigned long hpa,
-				  unsigned long nbytes)
+/*
+ * called with kvm->mmu_lock held
+ * For a given memslot, invalidate all of the rmap entries which fall into the
+ * given range.
+ */
+void kvmhv_invalidate_nest_rmap_range(struct kvm *kvm,
+				      const struct kvm_memory_slot *memslot,
+				      unsigned long gpa, unsigned long hpa,
+				      unsigned long nbytes)
 {
 	unsigned long gfn, end_gfn;
 	unsigned long addr_mask;
@@ -912,10 +934,11 @@  void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
 
 	for (; gfn < end_gfn; gfn++) {
 		unsigned long *rmap = &memslot->arch.rmap[gfn];
-		kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
+		kvmhv_invalidate_nest_rmap_list(kvm, rmap, hpa, addr_mask);
 	}
 }
 
+/* Free the nest rmap structures for a given memslot */
 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
 {
 	unsigned long page;