diff mbox series

[17/23] KVM: x86/mmu: Pass bool flush parameter to drop_large_spte()

Message ID 20220203010051.2813563-18-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series Extend Eager Page Splitting to the shadow MMU | expand

Commit Message

David Matlack Feb. 3, 2022, 1 a.m. UTC
drop_large_spte() drops a large SPTE if it exists and then flushes TLBs.
Its helper function, __drop_large_spte(), does the drop without the
flush. This difference is not obvious from the name.

To make the code more readable, pass an explicit flush parameter. Also
replace the vCPU pointer with a KVM pointer so we can get rid of the
double-underscore helper function.

This is also in preparation for a future commit that will conditionally
flush after dropping a large SPTE.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/mmu.c         | 25 +++++++++++--------------
 arch/x86/kvm/mmu/paging_tmpl.h |  4 ++--
 2 files changed, 13 insertions(+), 16 deletions(-)

Comments

Ben Gardon Feb. 28, 2022, 8:47 p.m. UTC | #1
On Wed, Feb 2, 2022 at 5:02 PM David Matlack <dmatlack@google.com> wrote:
>
> drop_large_spte() drops a large SPTE if it exists and then flushes TLBs.
> Its helper function, __drop_large_spte(), does the drop without the
> flush. This difference is not obvious from the name.
>
> To make the code more readable, pass an explicit flush parameter. Also
> replace the vCPU pointer with a KVM pointer so we can get rid of the
> double-underscore helper function.
>
> This is also in preparation for a future commit that will conditionally
> flush after dropping a large SPTE.
>
> No functional change intended.
>
> Signed-off-by: David Matlack <dmatlack@google.com>
> ---
>  arch/x86/kvm/mmu/mmu.c         | 25 +++++++++++--------------
>  arch/x86/kvm/mmu/paging_tmpl.h |  4 ++--
>  2 files changed, 13 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 99ad7cc8683f..2d47a54e62a5 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1162,23 +1162,20 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
>  }
>
>
> -static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
> +static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)

Since there are no callers of __drop_large_spte, I'd be inclined to
hold off on adding the flush parameter in this commit and just add it
when it's needed, or better yet after you add the new user with the
conditional flush so that there's a commit explaining why it's safe to
not always flush in that case.

>  {
> -       if (is_large_pte(*sptep)) {
> -               WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
> -               drop_spte(kvm, sptep);
> -               return true;
> -       }
> +       struct kvm_mmu_page *sp;
>
> -       return false;
> -}
> +       if (!is_large_pte(*sptep))
> +               return;
>
> -static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
> -{
> -       if (__drop_large_spte(vcpu->kvm, sptep)) {
> -               struct kvm_mmu_page *sp = sptep_to_sp(sptep);
> +       sp = sptep_to_sp(sptep);
> +       WARN_ON(sp->role.level == PG_LEVEL_4K);
> +
> +       drop_spte(kvm, sptep);
>
> -               kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
> +       if (flush) {
> +               kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
>                         KVM_PAGES_PER_HPAGE(sp->role.level));
>         }
>  }
> @@ -3051,7 +3048,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
>                 if (it.level == fault->goal_level)
>                         break;
>
> -               drop_large_spte(vcpu, it.sptep);
> +               drop_large_spte(vcpu->kvm, it.sptep, true);
>                 if (is_shadow_present_pte(*it.sptep))
>                         continue;
>
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 703dfb062bf0..ba61de29f2e5 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -677,7 +677,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
>                 gfn_t table_gfn;
>
>                 clear_sp_write_flooding_count(it.sptep);
> -               drop_large_spte(vcpu, it.sptep);
> +               drop_large_spte(vcpu->kvm, it.sptep, true);
>
>                 sp = NULL;
>                 if (!is_shadow_present_pte(*it.sptep)) {
> @@ -739,7 +739,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
>
>                 validate_direct_spte(vcpu, it.sptep, direct_access);
>
> -               drop_large_spte(vcpu, it.sptep);
> +               drop_large_spte(vcpu->kvm, it.sptep, true);
>
>                 if (!is_shadow_present_pte(*it.sptep)) {
>                         sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn,
> --
> 2.35.0.rc2.247.g8bbb082509-goog
>
David Matlack March 3, 2022, 7:52 p.m. UTC | #2
On Mon, Feb 28, 2022 at 12:47 PM Ben Gardon <bgardon@google.com> wrote:
>
> On Wed, Feb 2, 2022 at 5:02 PM David Matlack <dmatlack@google.com> wrote:
> >
> > drop_large_spte() drops a large SPTE if it exists and then flushes TLBs.
> > Its helper function, __drop_large_spte(), does the drop without the
> > flush. This difference is not obvious from the name.
> >
> > To make the code more readable, pass an explicit flush parameter. Also
> > replace the vCPU pointer with a KVM pointer so we can get rid of the
> > double-underscore helper function.
> >
> > This is also in preparation for a future commit that will conditionally
> > flush after dropping a large SPTE.
> >
> > No functional change intended.
> >
> > Signed-off-by: David Matlack <dmatlack@google.com>
> > ---
> >  arch/x86/kvm/mmu/mmu.c         | 25 +++++++++++--------------
> >  arch/x86/kvm/mmu/paging_tmpl.h |  4 ++--
> >  2 files changed, 13 insertions(+), 16 deletions(-)
> >
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 99ad7cc8683f..2d47a54e62a5 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -1162,23 +1162,20 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
> >  }
> >
> >
> > -static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
> > +static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
>
> Since there are no callers of __drop_large_spte, I'd be inclined to
> hold off on adding the flush parameter in this commit and just add it
> when it's needed,

The same argument about waiting until there's a user could be said
about "KVM: x86/mmu: Pass access information to
make_huge_page_split_spte()". I agree with this advice when the future
user is entirely theoretical or some future series. But when the
future user is literally the next commit in the series, I think it's
ok to do things this way since it distributes the net diff more evenly
among patches, which eases reviewing.

But, you've got me thinking and I think I want to change this commit
slightly: I'll keep __drop_larg_spte() but push all the implementation
into it and add a bool flush parameter there. That way we don't have
to change all the call sites of drop_large_spte() in this commit. The
implementation of drop_large_spte() will just be
__drop_large_spte(..., true). And the next commit can call
__drop_large_spte(..., false) with a comment.

> or better yet after you add the new user with the
> conditional flush so that there's a commit explaining why it's safe to
> not always flush in that case.
>
> >  {
> > -       if (is_large_pte(*sptep)) {
> > -               WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
> > -               drop_spte(kvm, sptep);
> > -               return true;
> > -       }
> > +       struct kvm_mmu_page *sp;
> >
> > -       return false;
> > -}
> > +       if (!is_large_pte(*sptep))
> > +               return;
> >
> > -static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
> > -{
> > -       if (__drop_large_spte(vcpu->kvm, sptep)) {
> > -               struct kvm_mmu_page *sp = sptep_to_sp(sptep);
> > +       sp = sptep_to_sp(sptep);
> > +       WARN_ON(sp->role.level == PG_LEVEL_4K);
> > +
> > +       drop_spte(kvm, sptep);
> >
> > -               kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
> > +       if (flush) {
> > +               kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
> >                         KVM_PAGES_PER_HPAGE(sp->role.level));
> >         }
> >  }
> > @@ -3051,7 +3048,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
> >                 if (it.level == fault->goal_level)
> >                         break;
> >
> > -               drop_large_spte(vcpu, it.sptep);
> > +               drop_large_spte(vcpu->kvm, it.sptep, true);
> >                 if (is_shadow_present_pte(*it.sptep))
> >                         continue;
> >
> > diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> > index 703dfb062bf0..ba61de29f2e5 100644
> > --- a/arch/x86/kvm/mmu/paging_tmpl.h
> > +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> > @@ -677,7 +677,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
> >                 gfn_t table_gfn;
> >
> >                 clear_sp_write_flooding_count(it.sptep);
> > -               drop_large_spte(vcpu, it.sptep);
> > +               drop_large_spte(vcpu->kvm, it.sptep, true);
> >
> >                 sp = NULL;
> >                 if (!is_shadow_present_pte(*it.sptep)) {
> > @@ -739,7 +739,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
> >
> >                 validate_direct_spte(vcpu, it.sptep, direct_access);
> >
> > -               drop_large_spte(vcpu, it.sptep);
> > +               drop_large_spte(vcpu->kvm, it.sptep, true);
> >
> >                 if (!is_shadow_present_pte(*it.sptep)) {
> >                         sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn,
> > --
> > 2.35.0.rc2.247.g8bbb082509-goog
> >
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 99ad7cc8683f..2d47a54e62a5 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1162,23 +1162,20 @@  static void drop_spte(struct kvm *kvm, u64 *sptep)
 }
 
 
-static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
+static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
 {
-	if (is_large_pte(*sptep)) {
-		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
-		drop_spte(kvm, sptep);
-		return true;
-	}
+	struct kvm_mmu_page *sp;
 
-	return false;
-}
+	if (!is_large_pte(*sptep))
+		return;
 
-static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
-{
-	if (__drop_large_spte(vcpu->kvm, sptep)) {
-		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
+	sp = sptep_to_sp(sptep);
+	WARN_ON(sp->role.level == PG_LEVEL_4K);
+
+	drop_spte(kvm, sptep);
 
-		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
+	if (flush) {
+		kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
 			KVM_PAGES_PER_HPAGE(sp->role.level));
 	}
 }
@@ -3051,7 +3048,7 @@  static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 		if (it.level == fault->goal_level)
 			break;
 
-		drop_large_spte(vcpu, it.sptep);
+		drop_large_spte(vcpu->kvm, it.sptep, true);
 		if (is_shadow_present_pte(*it.sptep))
 			continue;
 
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 703dfb062bf0..ba61de29f2e5 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -677,7 +677,7 @@  static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
 		gfn_t table_gfn;
 
 		clear_sp_write_flooding_count(it.sptep);
-		drop_large_spte(vcpu, it.sptep);
+		drop_large_spte(vcpu->kvm, it.sptep, true);
 
 		sp = NULL;
 		if (!is_shadow_present_pte(*it.sptep)) {
@@ -739,7 +739,7 @@  static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
 
 		validate_direct_spte(vcpu, it.sptep, direct_access);
 
-		drop_large_spte(vcpu, it.sptep);
+		drop_large_spte(vcpu->kvm, it.sptep, true);
 
 		if (!is_shadow_present_pte(*it.sptep)) {
 			sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn,