diff mbox series

[v1,05/13] KVM: x86/mmu: Move restore_acc_track_spte to spte.c

Message ID 20211213225918.672507-6-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Eager Page Splitting for the TDP MMU | expand

Commit Message

David Matlack Dec. 13, 2021, 10:59 p.m. UTC
restore_acc_track_spte is purely an SPTE manipulation, making it a good
fit for spte.c. It is also needed in spte.c in a follow-up commit so we
can construct child SPTEs during large page splitting.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu/mmu.c  | 18 ------------------
 arch/x86/kvm/mmu/spte.c | 18 ++++++++++++++++++
 arch/x86/kvm/mmu/spte.h |  1 +
 3 files changed, 19 insertions(+), 18 deletions(-)

Comments

Peter Xu Jan. 4, 2022, 10:33 a.m. UTC | #1
On Mon, Dec 13, 2021 at 10:59:10PM +0000, David Matlack wrote:
> restore_acc_track_spte is purely an SPTE manipulation, making it a good
> fit for spte.c. It is also needed in spte.c in a follow-up commit so we
> can construct child SPTEs during large page splitting.
> 
> No functional change intended.
> 
> Signed-off-by: David Matlack <dmatlack@google.com>
> Reviewed-by: Ben Gardon <bgardon@google.com>

Reviewed-by: Peter Xu <peterx@redhat.com>
Sean Christopherson Jan. 6, 2022, 8:27 p.m. UTC | #2
On Mon, Dec 13, 2021, David Matlack wrote:
> restore_acc_track_spte is purely an SPTE manipulation, making it a good
> fit for spte.c. It is also needed in spte.c in a follow-up commit so we
> can construct child SPTEs during large page splitting.
> 
> No functional change intended.
> 
> Signed-off-by: David Matlack <dmatlack@google.com>
> Reviewed-by: Ben Gardon <bgardon@google.com>
> ---
>  arch/x86/kvm/mmu/mmu.c  | 18 ------------------
>  arch/x86/kvm/mmu/spte.c | 18 ++++++++++++++++++
>  arch/x86/kvm/mmu/spte.h |  1 +
>  3 files changed, 19 insertions(+), 18 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 8b702f2b6a70..3c2cb4dd1f11 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -646,24 +646,6 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
>  	return __get_spte_lockless(sptep);
>  }
>  
> -/* Restore an acc-track PTE back to a regular PTE */
> -static u64 restore_acc_track_spte(u64 spte)
> -{
> -	u64 new_spte = spte;
> -	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
> -			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
> -
> -	WARN_ON_ONCE(spte_ad_enabled(spte));
> -	WARN_ON_ONCE(!is_access_track_spte(spte));
> -
> -	new_spte &= ~shadow_acc_track_mask;
> -	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
> -		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
> -	new_spte |= saved_bits;
> -
> -	return new_spte;
> -}
> -
>  /* Returns the Accessed status of the PTE and resets it at the same time. */
>  static bool mmu_spte_age(u64 *sptep)
>  {
> diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
> index 8a7b03207762..fd34ae5d6940 100644
> --- a/arch/x86/kvm/mmu/spte.c
> +++ b/arch/x86/kvm/mmu/spte.c
> @@ -268,6 +268,24 @@ u64 mark_spte_for_access_track(u64 spte)
>  	return spte;
>  }
>  
> +/* Restore an acc-track PTE back to a regular PTE */
> +u64 restore_acc_track_spte(u64 spte)
> +{
> +	u64 new_spte = spte;
> +	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
> +			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;

Obviously not your code, but this could be:

	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) &
			 SHADOW_ACC_TRACK_SAVED_BITS_MASK;

	WARN_ON_ONCE(spte_ad_enabled(spte));
	WARN_ON_ONCE(!is_access_track_spte(spte));

	spte &= ~shadow_acc_track_mask;
	spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
		  SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
	spte |= saved_bits;

	return spte;

which is really just an excuse to move the ampersand up a line :-)

And looking at the two callers, the WARNs are rather silly.  The spte_ad_enabled()
WARN is especially pointless because that's also checked by is_access_track_spte().
I like paranoid WARNs as much as anyone, but I don't see why this code warrants
extra checking relative to the other SPTE helpers that have more subtle requirements.

At that point, make make this an inline helper?

  static inline u64 restore_acc_track_spte(u64 spte)
  {
	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) &
			 SHADOW_ACC_TRACK_SAVED_BITS_MASK;

	spte &= ~shadow_acc_track_mask;
	spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
		  SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
	spte |= saved_bits;

	return spte;
  }

> +	WARN_ON_ONCE(spte_ad_enabled(spte));
> +	WARN_ON_ONCE(!is_access_track_spte(spte));
> +
> +	new_spte &= ~shadow_acc_track_mask;
> +	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
> +		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
> +	new_spte |= saved_bits;
> +
> +	return new_spte;
> +}
> +
>  void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
>  {
>  	BUG_ON((u64)(unsigned)access_mask != access_mask);
> diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
> index a4af2a42695c..9b0c7b27f23f 100644
> --- a/arch/x86/kvm/mmu/spte.h
> +++ b/arch/x86/kvm/mmu/spte.h
> @@ -337,6 +337,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
>  u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
>  u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
>  u64 mark_spte_for_access_track(u64 spte);
> +u64 restore_acc_track_spte(u64 spte);
>  u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
>  
>  void kvm_mmu_reset_all_pte_masks(void);
> -- 
> 2.34.1.173.g76aa8bc2d0-goog
>
David Matlack Jan. 6, 2022, 10:58 p.m. UTC | #3
On Thu, Jan 6, 2022 at 12:27 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Mon, Dec 13, 2021, David Matlack wrote:
> > restore_acc_track_spte is purely an SPTE manipulation, making it a good
> > fit for spte.c. It is also needed in spte.c in a follow-up commit so we
> > can construct child SPTEs during large page splitting.
> >
> > No functional change intended.
> >
> > Signed-off-by: David Matlack <dmatlack@google.com>
> > Reviewed-by: Ben Gardon <bgardon@google.com>
> > ---
> >  arch/x86/kvm/mmu/mmu.c  | 18 ------------------
> >  arch/x86/kvm/mmu/spte.c | 18 ++++++++++++++++++
> >  arch/x86/kvm/mmu/spte.h |  1 +
> >  3 files changed, 19 insertions(+), 18 deletions(-)
> >
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 8b702f2b6a70..3c2cb4dd1f11 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -646,24 +646,6 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
> >       return __get_spte_lockless(sptep);
> >  }
> >
> > -/* Restore an acc-track PTE back to a regular PTE */
> > -static u64 restore_acc_track_spte(u64 spte)
> > -{
> > -     u64 new_spte = spte;
> > -     u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
> > -                      & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
> > -
> > -     WARN_ON_ONCE(spte_ad_enabled(spte));
> > -     WARN_ON_ONCE(!is_access_track_spte(spte));
> > -
> > -     new_spte &= ~shadow_acc_track_mask;
> > -     new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
> > -                   SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
> > -     new_spte |= saved_bits;
> > -
> > -     return new_spte;
> > -}
> > -
> >  /* Returns the Accessed status of the PTE and resets it at the same time. */
> >  static bool mmu_spte_age(u64 *sptep)
> >  {
> > diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
> > index 8a7b03207762..fd34ae5d6940 100644
> > --- a/arch/x86/kvm/mmu/spte.c
> > +++ b/arch/x86/kvm/mmu/spte.c
> > @@ -268,6 +268,24 @@ u64 mark_spte_for_access_track(u64 spte)
> >       return spte;
> >  }
> >
> > +/* Restore an acc-track PTE back to a regular PTE */
> > +u64 restore_acc_track_spte(u64 spte)
> > +{
> > +     u64 new_spte = spte;
> > +     u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
> > +                      & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
>
> Obviously not your code, but this could be:
>
>         u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) &
>                          SHADOW_ACC_TRACK_SAVED_BITS_MASK;
>
>         WARN_ON_ONCE(spte_ad_enabled(spte));
>         WARN_ON_ONCE(!is_access_track_spte(spte));
>
>         spte &= ~shadow_acc_track_mask;
>         spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
>                   SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
>         spte |= saved_bits;
>
>         return spte;
>
> which is really just an excuse to move the ampersand up a line :-)
>
> And looking at the two callers, the WARNs are rather silly.  The spte_ad_enabled()
> WARN is especially pointless because that's also checked by is_access_track_spte().
> I like paranoid WARNs as much as anyone, but I don't see why this code warrants
> extra checking relative to the other SPTE helpers that have more subtle requirements.
>
> At that point, make make this an inline helper?
>
>   static inline u64 restore_acc_track_spte(u64 spte)
>   {
>         u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) &
>                          SHADOW_ACC_TRACK_SAVED_BITS_MASK;
>
>         spte &= ~shadow_acc_track_mask;
>         spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
>                   SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
>         spte |= saved_bits;
>
>         return spte;
>   }

That all sounds reasonable. I'll include some additional patches in
the next version to include these cleanups.
>
> > +     WARN_ON_ONCE(spte_ad_enabled(spte));
> > +     WARN_ON_ONCE(!is_access_track_spte(spte));
> > +
> > +     new_spte &= ~shadow_acc_track_mask;
> > +     new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
> > +                   SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
> > +     new_spte |= saved_bits;
> > +
> > +     return new_spte;
> > +}
> > +
> >  void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
> >  {
> >       BUG_ON((u64)(unsigned)access_mask != access_mask);
> > diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
> > index a4af2a42695c..9b0c7b27f23f 100644
> > --- a/arch/x86/kvm/mmu/spte.h
> > +++ b/arch/x86/kvm/mmu/spte.h
> > @@ -337,6 +337,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
> >  u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
> >  u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
> >  u64 mark_spte_for_access_track(u64 spte);
> > +u64 restore_acc_track_spte(u64 spte);
> >  u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
> >
> >  void kvm_mmu_reset_all_pte_masks(void);
> > --
> > 2.34.1.173.g76aa8bc2d0-goog
> >
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 8b702f2b6a70..3c2cb4dd1f11 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -646,24 +646,6 @@  static u64 mmu_spte_get_lockless(u64 *sptep)
 	return __get_spte_lockless(sptep);
 }
 
-/* Restore an acc-track PTE back to a regular PTE */
-static u64 restore_acc_track_spte(u64 spte)
-{
-	u64 new_spte = spte;
-	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
-			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
-
-	WARN_ON_ONCE(spte_ad_enabled(spte));
-	WARN_ON_ONCE(!is_access_track_spte(spte));
-
-	new_spte &= ~shadow_acc_track_mask;
-	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
-		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
-	new_spte |= saved_bits;
-
-	return new_spte;
-}
-
 /* Returns the Accessed status of the PTE and resets it at the same time. */
 static bool mmu_spte_age(u64 *sptep)
 {
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 8a7b03207762..fd34ae5d6940 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -268,6 +268,24 @@  u64 mark_spte_for_access_track(u64 spte)
 	return spte;
 }
 
+/* Restore an acc-track PTE back to a regular PTE */
+u64 restore_acc_track_spte(u64 spte)
+{
+	u64 new_spte = spte;
+	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
+			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
+
+	WARN_ON_ONCE(spte_ad_enabled(spte));
+	WARN_ON_ONCE(!is_access_track_spte(spte));
+
+	new_spte &= ~shadow_acc_track_mask;
+	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
+		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
+	new_spte |= saved_bits;
+
+	return new_spte;
+}
+
 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
 {
 	BUG_ON((u64)(unsigned)access_mask != access_mask);
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index a4af2a42695c..9b0c7b27f23f 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -337,6 +337,7 @@  bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
 u64 mark_spte_for_access_track(u64 spte);
+u64 restore_acc_track_spte(u64 spte);
 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
 
 void kvm_mmu_reset_all_pte_masks(void);