diff mbox series

[43/43] KVM: x86: Drop pointless @reset_roots from kvm_init_mmu()

Message ID 20210424004645.3950558-44-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: vCPU RESET/INIT fixes and consolidation | expand

Commit Message

Sean Christopherson April 24, 2021, 12:46 a.m. UTC
Remove the @reset_roots param from kvm_init_mmu(), the one user,
kvm_mmu_reset_context() has already unloaded the MMU and thus freed and
invalidated all roots.  This also happens to be why the reset_roots=true
paths doesn't leak roots; they're already invalid.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/mmu.h        |  2 +-
 arch/x86/kvm/mmu/mmu.c    | 13 ++-----------
 arch/x86/kvm/svm/nested.c |  2 +-
 arch/x86/kvm/vmx/nested.c |  2 +-
 4 files changed, 5 insertions(+), 14 deletions(-)

Comments

Sean Christopherson May 27, 2021, 7:11 p.m. UTC | #1
On Fri, Apr 23, 2021, Sean Christopherson wrote:
> Remove the @reset_roots param from kvm_init_mmu(), the one user,
> kvm_mmu_reset_context() has already unloaded the MMU and thus freed and
> invalidated all roots.  This also happens to be why the reset_roots=true
> paths doesn't leak roots; they're already invalid.
> 
> No functional change intended.
> 
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  arch/x86/kvm/mmu.h        |  2 +-
>  arch/x86/kvm/mmu/mmu.c    | 13 ++-----------
>  arch/x86/kvm/svm/nested.c |  2 +-
>  arch/x86/kvm/vmx/nested.c |  2 +-
>  4 files changed, 5 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 88d0ed5225a4..63b49725fb24 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -65,7 +65,7 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
>  void
>  reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
>  
> -void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
> +void kvm_init_mmu(struct kvm_vcpu *vcpu);
>  void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
>  			     gpa_t nested_cr3);
>  void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 930ac8a7e7c9..ff3e200b32dd 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4793,17 +4793,8 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
>  	update_last_nonleaf_level(vcpu, g_context);
>  }
>  
> -void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
> +void kvm_init_mmu(struct kvm_vcpu *vcpu)
>  {
> -	if (reset_roots) {
> -		uint i;
> -
> -		vcpu->arch.mmu->root_hpa = INVALID_PAGE;
> -
> -		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
> -			vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;

Egad!  This is wrong.  mmu->root_hpa is guaranteed to be INVALID_PAGE, but the
prev_roots are not!  I'll drop this patch and do cleanup of this code in a
separate series.

> -	}
> -
>  	if (mmu_is_nested(vcpu))
>  		init_kvm_nested_mmu(vcpu);
>  	else if (tdp_enabled)
Sean Christopherson May 27, 2021, 7:25 p.m. UTC | #2
On Thu, May 27, 2021, Sean Christopherson wrote:
> On Fri, Apr 23, 2021, Sean Christopherson wrote:
> > Remove the @reset_roots param from kvm_init_mmu(), the one user,
> > kvm_mmu_reset_context() has already unloaded the MMU and thus freed and
> > invalidated all roots.  This also happens to be why the reset_roots=true
> > paths doesn't leak roots; they're already invalid.
> > 
> > No functional change intended.
> > 
> > Signed-off-by: Sean Christopherson <seanjc@google.com>
> > ---
> >  arch/x86/kvm/mmu.h        |  2 +-
> >  arch/x86/kvm/mmu/mmu.c    | 13 ++-----------
> >  arch/x86/kvm/svm/nested.c |  2 +-
> >  arch/x86/kvm/vmx/nested.c |  2 +-
> >  4 files changed, 5 insertions(+), 14 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> > index 88d0ed5225a4..63b49725fb24 100644
> > --- a/arch/x86/kvm/mmu.h
> > +++ b/arch/x86/kvm/mmu.h
> > @@ -65,7 +65,7 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
> >  void
> >  reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
> >  
> > -void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
> > +void kvm_init_mmu(struct kvm_vcpu *vcpu);
> >  void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
> >  			     gpa_t nested_cr3);
> >  void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 930ac8a7e7c9..ff3e200b32dd 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -4793,17 +4793,8 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
> >  	update_last_nonleaf_level(vcpu, g_context);
> >  }
> >  
> > -void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
> > +void kvm_init_mmu(struct kvm_vcpu *vcpu)
> >  {
> > -	if (reset_roots) {
> > -		uint i;
> > -
> > -		vcpu->arch.mmu->root_hpa = INVALID_PAGE;
> > -
> > -		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
> > -			vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
> 
> Egad!  This is wrong.  mmu->root_hpa is guaranteed to be INVALID_PAGE, but the
> prev_roots are not!  I'll drop this patch and do cleanup of this code in a
> separate series.

*sigh*  Jumped the gun, I was right the first time.  kvm_mmu_free_roots() does
invalidate prev_roots[*] via mmu_free_root_page().  I still think I'll drop this
patch from this series, I don't think there's anything in this series that is
needed to purge @reset_roots.

	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,  <-- tricky little devil
					   &invalid_list);


> 
> > -	}
> > -
> >  	if (mmu_is_nested(vcpu))
> >  		init_kvm_nested_mmu(vcpu);
> >  	else if (tdp_enabled)
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 88d0ed5225a4..63b49725fb24 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -65,7 +65,7 @@  void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
 void
 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
 
-void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
+void kvm_init_mmu(struct kvm_vcpu *vcpu);
 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
 			     gpa_t nested_cr3);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 930ac8a7e7c9..ff3e200b32dd 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4793,17 +4793,8 @@  static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
 	update_last_nonleaf_level(vcpu, g_context);
 }
 
-void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
+void kvm_init_mmu(struct kvm_vcpu *vcpu)
 {
-	if (reset_roots) {
-		uint i;
-
-		vcpu->arch.mmu->root_hpa = INVALID_PAGE;
-
-		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
-			vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
-	}
-
 	if (mmu_is_nested(vcpu))
 		init_kvm_nested_mmu(vcpu);
 	else if (tdp_enabled)
@@ -4829,7 +4820,7 @@  kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
 {
 	kvm_mmu_unload(vcpu);
-	kvm_init_mmu(vcpu, true);
+	kvm_init_mmu(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
 
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 540d43ba2cf4..a0b48a8f32ed 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -406,7 +406,7 @@  static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
 	vcpu->arch.cr3 = cr3;
 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
 
-	kvm_init_mmu(vcpu, false);
+	kvm_init_mmu(vcpu);
 
 	return 0;
 }
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 9dcdf158a405..3a5b86932a5e 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -1137,7 +1137,7 @@  static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
 	vcpu->arch.cr3 = cr3;
 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
 
-	kvm_init_mmu(vcpu, false);
+	kvm_init_mmu(vcpu);
 
 	return 0;
 }