diff mbox series

[1/2] Revert "KVM: x86/mmu: Don't step down in the TDP iterator when zapping all SPTEs"

Message ID 20211124214421.458549-2-mizhang@google.com (mailing list archive)
State New, archived
Headers show
Series optimize spte zapping in zap_gfn_range() | expand

Commit Message

Mingwei Zhang Nov. 24, 2021, 9:44 p.m. UTC
Not stepping down in TDP iterator in `zap_all` case avoids re-reading the
non-leaf SPTEs, thus accelerates the zapping process . But when the number
of SPTEs is too large, we may run out of CPU time and causes a RCU stall
warnings in __handle_changed_pte() in the context of zap_gfn_range().

Revert this patch to allow eliminating RCU stall warning using a two-phase
zapping for `zap_all` case.

This reverts commit 0103098fb4f13b447b26ed514bcd3140f6791047.

Cc: Sean Christopherson <seanjc@google.com>
Cc: Ben Gardon <bgardon@google.com>
Cc: David Matlack <dmatlack@google.com>

Signed-off-by: Mingwei Zhang <mizhang@google.com>
---
 arch/x86/kvm/mmu/tdp_mmu.c | 9 +--------
 1 file changed, 1 insertion(+), 8 deletions(-)

Comments

David Matlack Nov. 30, 2021, 12:50 a.m. UTC | #1
On Wed, Nov 24, 2021 at 09:44:20PM +0000, Mingwei Zhang wrote:
> Not stepping down in TDP iterator in `zap_all` case avoids re-reading the
> non-leaf SPTEs, thus accelerates the zapping process . But when the number
> of SPTEs is too large, we may run out of CPU time and causes a RCU stall
> warnings in __handle_changed_pte() in the context of zap_gfn_range().
> 
> Revert this patch to allow eliminating RCU stall warning using a two-phase
> zapping for `zap_all` case.
> 
> This reverts commit 0103098fb4f13b447b26ed514bcd3140f6791047.
> 
> Cc: Sean Christopherson <seanjc@google.com>
> Cc: Ben Gardon <bgardon@google.com>
> Cc: David Matlack <dmatlack@google.com>
> 
> Signed-off-by: Mingwei Zhang <mizhang@google.com>

Reviewed-by: David Matlack <dmatlack@google.com>

> ---
>  arch/x86/kvm/mmu/tdp_mmu.c | 9 +--------
>  1 file changed, 1 insertion(+), 8 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 7c5dd83e52de..89d16bb104de 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -706,12 +706,6 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
>  	bool zap_all = (start == 0 && end >= max_gfn_host);
>  	struct tdp_iter iter;
>  
> -	/*
> -	 * No need to try to step down in the iterator when zapping all SPTEs,
> -	 * zapping the top-level non-leaf SPTEs will recurse on their children.
> -	 */
> -	int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
> -
>  	/*
>  	 * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
>  	 * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
> @@ -723,8 +717,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
>  
>  	rcu_read_lock();
>  
> -	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
> -				   min_level, start, end) {
> +	tdp_root_for_each_pte(iter, root, start, end) {
>  retry:
>  		if (can_yield &&
>  		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
> -- 
> 2.34.0.rc2.393.gf8c9666880-goog
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 7c5dd83e52de..89d16bb104de 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -706,12 +706,6 @@  static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
 	bool zap_all = (start == 0 && end >= max_gfn_host);
 	struct tdp_iter iter;
 
-	/*
-	 * No need to try to step down in the iterator when zapping all SPTEs,
-	 * zapping the top-level non-leaf SPTEs will recurse on their children.
-	 */
-	int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
-
 	/*
 	 * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
 	 * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
@@ -723,8 +717,7 @@  static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
 
 	rcu_read_lock();
 
-	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
-				   min_level, start, end) {
+	tdp_root_for_each_pte(iter, root, start, end) {
 retry:
 		if (can_yield &&
 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {