diff mbox series

[v3,2/7] KVM: x86/mmu: Atomically clear SPTE dirty state in the clear-dirty-log flow

Message ID 20230211014626.3659152-3-vipinsh@google.com (mailing list archive)
State New, archived
Headers show
Series Optimize clear dirty log | expand

Commit Message

Vipin Sharma Feb. 11, 2023, 1:46 a.m. UTC
Do atomic-AND to clear the dirty state of SPTEs. Optimize clear-dirty-log
flow by avoiding to go through __handle_changed_spte() and directly call
kvm_set_pfn_dirty() instead.

Atomic-AND allows to fetch the latest value in SPTE, clear only its
dirty state and set the new SPTE value.  This optimization avoids
executing unnecessary checks by not calling __handle_changed_spte().

With the removal of tdp_mmu_set_spte_no_dirty_log(), "record_dirty_log"
parameter in __tdp_mmu_set_spte() is now obsolete. It will always be set
to true by its caller. This dead code will be cleaned up in future
commits.

Tested on a VM (160 vCPUs, 160 GB memory) and found that performance of clear
dirty log stage improved by ~40% in dirty_log_perf_test

Before optimization:
--------------------
Iteration 1 clear dirty log time: 3.638543593s
Iteration 2 clear dirty log time: 3.145032742s
Iteration 3 clear dirty log time: 3.142340358s
Clear dirty log over 3 iterations took 9.925916693s. (Avg 3.308638897s/iteration)

After optimization:
-------------------
Iteration 1 clear dirty log time: 2.318988110s
Iteration 2 clear dirty log time: 1.794470164s
Iteration 3 clear dirty log time: 1.791668628s
Clear dirty log over 3 iterations took 5.905126902s. (Avg 1.968375634s/iteration)

Signed-off-by: Vipin Sharma <vipinsh@google.com>
---
 arch/x86/kvm/mmu/tdp_iter.h | 14 ++++++++++++++
 arch/x86/kvm/mmu/tdp_mmu.c  | 35 +++++++++++++++--------------------
 2 files changed, 29 insertions(+), 20 deletions(-)

Comments

David Matlack Feb. 15, 2023, 9:12 p.m. UTC | #1
On Fri, Feb 10, 2023 at 05:46:21PM -0800, Vipin Sharma wrote:
> Do atomic-AND to clear the dirty state of SPTEs. Optimize clear-dirty-log
> flow by avoiding to go through __handle_changed_spte() and directly call
> kvm_set_pfn_dirty() instead.
> 
> Atomic-AND allows to fetch the latest value in SPTE, clear only its
> dirty state and set the new SPTE value.  This optimization avoids
> executing unnecessary checks by not calling __handle_changed_spte().
> 
> With the removal of tdp_mmu_set_spte_no_dirty_log(), "record_dirty_log"
> parameter in __tdp_mmu_set_spte() is now obsolete. It will always be set
> to true by its caller. This dead code will be cleaned up in future
> commits.
> 
> Tested on a VM (160 vCPUs, 160 GB memory) and found that performance of clear
> dirty log stage improved by ~40% in dirty_log_perf_test
> 
> Before optimization:
> --------------------
> Iteration 1 clear dirty log time: 3.638543593s
> Iteration 2 clear dirty log time: 3.145032742s
> Iteration 3 clear dirty log time: 3.142340358s
> Clear dirty log over 3 iterations took 9.925916693s. (Avg 3.308638897s/iteration)
> 
> After optimization:
> -------------------
> Iteration 1 clear dirty log time: 2.318988110s
> Iteration 2 clear dirty log time: 1.794470164s
> Iteration 3 clear dirty log time: 1.791668628s
> Clear dirty log over 3 iterations took 5.905126902s. (Avg 1.968375634s/iteration)
> 
> Signed-off-by: Vipin Sharma <vipinsh@google.com>

Reviewed-by: David Matlack <dmatlack@google.com>
Sean Christopherson March 17, 2023, 10:59 p.m. UTC | #2
On Fri, Feb 10, 2023, Vipin Sharma wrote:
> @@ -1677,8 +1670,13 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
>  static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
>  				  gfn_t gfn, unsigned long mask, bool wrprot)
>  {
> +	/*
> +	 * Either all SPTEs in TDP MMU will need write protection or none. This
> +	 * contract will not be modified for TDP MMU pages.
> +	 */
> +	u64 clear_bit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
> +							shadow_dirty_mask;

Switching from spte_ad_need_write_protect() to kvm_ad_enabled() belongs in a
separate.  In the unlikely event that the above assertion/contracts is invalid,
then any issues should bisect to the switch, not to a much more complex patch.

I'll make that happen when applying.
Vipin Sharma March 17, 2023, 11:50 p.m. UTC | #3
On Fri, Mar 17, 2023 at 3:59 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Fri, Feb 10, 2023, Vipin Sharma wrote:
> > @@ -1677,8 +1670,13 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
> >  static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
> >                                 gfn_t gfn, unsigned long mask, bool wrprot)
> >  {
> > +     /*
> > +      * Either all SPTEs in TDP MMU will need write protection or none. This
> > +      * contract will not be modified for TDP MMU pages.
> > +      */
> > +     u64 clear_bit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
> > +                                                     shadow_dirty_mask;
>
> Switching from spte_ad_need_write_protect() to kvm_ad_enabled() belongs in a
> separate.  In the unlikely event that the above assertion/contracts is invalid,
> then any issues should bisect to the switch, not to a much more complex patch.
>
> I'll make that happen when applying.

Make sense, thanks!
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
index c11c5d00b2c1..fae559559a80 100644
--- a/arch/x86/kvm/mmu/tdp_iter.h
+++ b/arch/x86/kvm/mmu/tdp_iter.h
@@ -58,6 +58,20 @@  static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
 	return old_spte;
 }
 
+static inline u64 tdp_mmu_clear_spte_bits(tdp_ptep_t sptep, u64 old_spte,
+					  u64 mask, int level)
+{
+	atomic64_t *sptep_atomic;
+
+	if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level)) {
+		sptep_atomic = (atomic64_t *)rcu_dereference(sptep);
+		return (u64)atomic64_fetch_and(~mask, sptep_atomic);
+	}
+
+	__kvm_tdp_mmu_write_spte(sptep, old_spte & ~mask);
+	return old_spte;
+}
+
 /*
  * A TDP iterator performs a pre-order walk over a TDP paging structure.
  */
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index bba33aea0fb0..66ccbeb9d845 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -771,13 +771,6 @@  static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
 	_tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
 }
 
-static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
-						 struct tdp_iter *iter,
-						 u64 new_spte)
-{
-	_tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
-}
-
 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
 	for_each_tdp_pte(_iter, _root, _start, _end)
 
@@ -1677,8 +1670,13 @@  bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
 				  gfn_t gfn, unsigned long mask, bool wrprot)
 {
+	/*
+	 * Either all SPTEs in TDP MMU will need write protection or none. This
+	 * contract will not be modified for TDP MMU pages.
+	 */
+	u64 clear_bit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
+							shadow_dirty_mask;
 	struct tdp_iter iter;
-	u64 new_spte;
 
 	rcu_read_lock();
 
@@ -1693,19 +1691,16 @@  static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
 
 		mask &= ~(1UL << (iter.gfn - gfn));
 
-		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
-			if (is_writable_pte(iter.old_spte))
-				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
-			else
-				continue;
-		} else {
-			if (iter.old_spte & shadow_dirty_mask)
-				new_spte = iter.old_spte & ~shadow_dirty_mask;
-			else
-				continue;
-		}
+		if (!(iter.old_spte & clear_bit))
+			continue;
 
-		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
+		iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
+							iter.old_spte,
+							clear_bit, iter.level);
+		trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
+					       iter.old_spte,
+					       iter.old_spte & ~clear_bit);
+		kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte));
 	}
 
 	rcu_read_unlock();