diff mbox series

[02/12] KVM: X86/MMU: Rename kvm_unlink_unsync_page() to kvm_mmu_page_clear_unsync()

Message ID 20220605064342.309219-3-jiangshanlai@gmail.com (mailing list archive)
State New, archived
Headers show
Series KVM: X86/MMU: Simpliy mmu_unsync_walk() | expand

Commit Message

Lai Jiangshan June 5, 2022, 6:43 a.m. UTC
From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

"Unlink" is ambiguous, the function does not disconnect any link.

Use "clear" instead which is an antonym of "mark" in the name of the
function mark_unsync() or kvm_mmu_mark_parents_unsync().

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/kvm/mmu/mmu.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

Comments

Sean Christopherson July 14, 2022, 10:10 p.m. UTC | #1
On Sun, Jun 05, 2022, Lai Jiangshan wrote:
> From: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> 
> "Unlink" is ambiguous, the function does not disconnect any link.
> 
> Use "clear" instead which is an antonym of "mark" in the name of the
> function mark_unsync() or kvm_mmu_mark_parents_unsync().

Hmm, but "clearing a page" is a common operation.  Might not be proper English,
but my vote is to use "unmark".  KVM already uses link+unlink, account+unaccount,
etc..., so mark+unmark should be intuitive for readers.

> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index f61416818116..c20981dfc4fd 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1825,7 +1825,7 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp,
>  	return __mmu_unsync_walk(sp, pvec);
>  }
>  
> -static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
> +static void kvm_mmu_page_clear_unsync(struct kvm *kvm, struct kvm_mmu_page *sp)
>  {
>  	WARN_ON(!sp->unsync);
>  	trace_kvm_mmu_sync_page(sp);
> @@ -1987,7 +1987,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,
>  		}
>  
>  		for_each_sp(pages, sp, parents, i) {
> -			kvm_unlink_unsync_page(vcpu->kvm, sp);
> +			kvm_mmu_page_clear_unsync(vcpu->kvm, sp);
>  			flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
>  			mmu_pages_clear_parents(&parents);
>  		}
> @@ -2326,7 +2326,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
>  		unaccount_shadowed(kvm, sp);
>  
>  	if (sp->unsync)
> -		kvm_unlink_unsync_page(kvm, sp);
> +		kvm_mmu_page_clear_unsync(kvm, sp);
>  	if (!sp->root_count) {
>  		/* Count self */
>  		(*nr_zapped)++;
> -- 
> 2.19.1.6.gb485710b
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f61416818116..c20981dfc4fd 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1825,7 +1825,7 @@  static int mmu_unsync_walk(struct kvm_mmu_page *sp,
 	return __mmu_unsync_walk(sp, pvec);
 }
 
-static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+static void kvm_mmu_page_clear_unsync(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
 	WARN_ON(!sp->unsync);
 	trace_kvm_mmu_sync_page(sp);
@@ -1987,7 +1987,7 @@  static int mmu_sync_children(struct kvm_vcpu *vcpu,
 		}
 
 		for_each_sp(pages, sp, parents, i) {
-			kvm_unlink_unsync_page(vcpu->kvm, sp);
+			kvm_mmu_page_clear_unsync(vcpu->kvm, sp);
 			flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
 			mmu_pages_clear_parents(&parents);
 		}
@@ -2326,7 +2326,7 @@  static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
 		unaccount_shadowed(kvm, sp);
 
 	if (sp->unsync)
-		kvm_unlink_unsync_page(kvm, sp);
+		kvm_mmu_page_clear_unsync(kvm, sp);
 	if (!sp->root_count) {
 		/* Count self */
 		(*nr_zapped)++;