diff mbox series

[RFC,01/15] KVM: x86/mmu: Rename rmap_write_protect to kvm_vcpu_write_protect_gfn

Message ID 20211119235759.1304274-2-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Eager Page Splitting for the TDP MMU | expand

Commit Message

David Matlack Nov. 19, 2021, 11:57 p.m. UTC
rmap_write_protect is a poor name because we may not even touch the rmap
if the TDP MMU is in use. It is also confusing that rmap_write_protect
is not a simpler wrapper around __rmap_write_protect, since that is the
typical flow for functions with double-underscore names.

Rename it to kvm_vcpu_write_protect_gfn to convey that we are
write-protecting a specific gfn in the context of a vCPU.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

Comments

Ben Gardon Nov. 22, 2021, 6:52 p.m. UTC | #1
On Fri, Nov 19, 2021 at 3:58 PM David Matlack <dmatlack@google.com> wrote:
>
> rmap_write_protect is a poor name because we may not even touch the rmap
> if the TDP MMU is in use. It is also confusing that rmap_write_protect
> is not a simpler wrapper around __rmap_write_protect, since that is the
> typical flow for functions with double-underscore names.
>
> Rename it to kvm_vcpu_write_protect_gfn to convey that we are
> write-protecting a specific gfn in the context of a vCPU.
>
> No functional change intended.
>
> Signed-off-by: David Matlack <dmatlack@google.com>

Reviewed-by: Ben Gardon <bgardon@google.com>


> ---
>  arch/x86/kvm/mmu/mmu.c | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 8f0035517450..16ffb571bc75 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1427,7 +1427,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
>         return write_protected;
>  }
>
> -static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
> +static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
>  {
>         struct kvm_memory_slot *slot;
>
> @@ -2026,7 +2026,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,
>                 bool protected = false;
>
>                 for_each_sp(pages, sp, parents, i)
> -                       protected |= rmap_write_protect(vcpu, sp->gfn);
> +                       protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
>
>                 if (protected) {
>                         kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
> @@ -2153,7 +2153,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
>         hlist_add_head(&sp->hash_link, sp_list);
>         if (!direct) {
>                 account_shadowed(vcpu->kvm, sp);
> -               if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
> +               if (level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn))
>                         kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
>         }
>         trace_kvm_mmu_get_page(sp, true);
> --
> 2.34.0.rc2.393.gf8c9666880-goog
>
Peter Xu Nov. 26, 2021, 12:18 p.m. UTC | #2
On Fri, Nov 19, 2021 at 11:57:45PM +0000, David Matlack wrote:
> rmap_write_protect is a poor name because we may not even touch the rmap
> if the TDP MMU is in use. It is also confusing that rmap_write_protect
> is not a simpler wrapper around __rmap_write_protect, since that is the
> typical flow for functions with double-underscore names.
> 
> Rename it to kvm_vcpu_write_protect_gfn to convey that we are
> write-protecting a specific gfn in the context of a vCPU.
> 
> No functional change intended.
> 
> Signed-off-by: David Matlack <dmatlack@google.com>

Reviewed-by: Peter Xu <peterx@redhat.com>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 8f0035517450..16ffb571bc75 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1427,7 +1427,7 @@  bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
 	return write_protected;
 }
 
-static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
+static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
 {
 	struct kvm_memory_slot *slot;
 
@@ -2026,7 +2026,7 @@  static int mmu_sync_children(struct kvm_vcpu *vcpu,
 		bool protected = false;
 
 		for_each_sp(pages, sp, parents, i)
-			protected |= rmap_write_protect(vcpu, sp->gfn);
+			protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
 
 		if (protected) {
 			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
@@ -2153,7 +2153,7 @@  static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 	hlist_add_head(&sp->hash_link, sp_list);
 	if (!direct) {
 		account_shadowed(vcpu->kvm, sp);
-		if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
+		if (level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn))
 			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
 	}
 	trace_kvm_mmu_get_page(sp, true);