diff mbox series

[12/12] KVM: X86/MMU: Rename struct kvm_mmu_pages to struct kvm_mmu_page_vec

Message ID 20220605064342.309219-13-jiangshanlai@gmail.com (mailing list archive)
State New, archived
Headers show
Series KVM: X86/MMU: Simpliy mmu_unsync_walk() | expand

Commit Message

Lai Jiangshan June 5, 2022, 6:43 a.m. UTC
From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

It is implemented as a vector and variable names for it are pvec.

Rename it to kvm_mmu_page_vec for better describing it.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/kvm/mmu/mmu.c | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

Comments

Sean Christopherson July 19, 2022, 8:45 p.m. UTC | #1
On Sun, Jun 05, 2022, Lai Jiangshan wrote:
> From: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> 
> It is implemented as a vector and variable names for it are pvec.

Please define "it" in the changelog before referencing "it".  Avoiding dependencies
on the shortlog is trivial and really does help as it avoids having to jump back
to see what "it" refers to.

> Rename it to kvm_mmu_page_vec for better describing it.
> 
> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> ---
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 7cfc4bc89f60..64e0d155068c 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1742,12 +1742,12 @@  static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
 
 #define KVM_PAGE_ARRAY_NR 16
 
-struct kvm_mmu_pages {
+struct kvm_mmu_page_vec {
 	struct kvm_mmu_page *sp[KVM_PAGE_ARRAY_NR];
 	unsigned int nr;
 };
 
-static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp)
+static int mmu_pages_add(struct kvm_mmu_page_vec *pvec, struct kvm_mmu_page *sp)
 {
 	int i;
 
@@ -1768,7 +1768,7 @@  static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
 }
 
 static int __mmu_unsync_walk_and_clear(struct kvm_mmu_page *sp,
-			   struct kvm_mmu_pages *pvec)
+			   struct kvm_mmu_page_vec *pvec)
 {
 	int i, ret, nr_unsync_leaf = 0;
 
@@ -1808,7 +1808,7 @@  static int __mmu_unsync_walk_and_clear(struct kvm_mmu_page *sp,
 }
 
 static int mmu_unsync_walk_and_clear(struct kvm_mmu_page *sp,
-			   struct kvm_mmu_pages *pvec)
+			   struct kvm_mmu_page_vec *pvec)
 {
 	pvec->nr = 0;
 	if (!sp->unsync_children)
@@ -1885,7 +1885,7 @@  static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
 }
 
-#define for_each_sp(pvec, sp, i)					\
+#define page_vec_for_each_sp(pvec, sp, i)					\
 		for (i = 0; i < pvec.nr && ({ sp = pvec.sp[i]; 1;}); i++)
 
 static int mmu_sync_children(struct kvm_vcpu *vcpu,
@@ -1893,14 +1893,14 @@  static int mmu_sync_children(struct kvm_vcpu *vcpu,
 {
 	int i;
 	struct kvm_mmu_page *sp;
-	struct kvm_mmu_pages pages;
+	struct kvm_mmu_page_vec pvec;
 	LIST_HEAD(invalid_list);
 	bool flush = false;
 
-	while (mmu_unsync_walk_and_clear(parent, &pages)) {
+	while (mmu_unsync_walk_and_clear(parent, &pvec)) {
 		bool protected = false;
 
-		for_each_sp(pages, sp, i)
+		page_vec_for_each_sp(pvec, sp, i)
 			protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
 
 		if (protected) {
@@ -1908,7 +1908,7 @@  static int mmu_sync_children(struct kvm_vcpu *vcpu,
 			flush = false;
 		}
 
-		for_each_sp(pages, sp, i) {
+		page_vec_for_each_sp(pvec, sp, i) {
 			kvm_mmu_page_clear_unsync(vcpu->kvm, sp);
 			flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
 		}
@@ -2208,15 +2208,15 @@  static int mmu_zap_unsync_children(struct kvm *kvm,
 				   struct list_head *invalid_list)
 {
 	int i, zapped = 0;
-	struct kvm_mmu_pages pages;
+	struct kvm_mmu_page_vec pvec;
 
 	if (parent->role.level == PG_LEVEL_4K)
 		return 0;
 
-	while (mmu_unsync_walk_and_clear(parent, &pages)) {
+	while (mmu_unsync_walk_and_clear(parent, &pvec)) {
 		struct kvm_mmu_page *sp;
 
-		for_each_sp(pages, sp, i) {
+		page_vec_for_each_sp(pvec, sp, i) {
 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
 			zapped++;
 		}