diff mbox series

[v2,3/3] KVM: nSVM: call KVM_REQ_GET_NESTED_STATE_PAGES on exit from SMM mode

Message ID 20210823114618.1184209-4-mlevitsk@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: few more SMM fixes | expand

Commit Message

Maxim Levitsky Aug. 23, 2021, 11:46 a.m. UTC
This allows nested SVM code to be more similar to nested VMX code.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 9 ++++++---
 arch/x86/kvm/svm/svm.c    | 8 +++++++-
 arch/x86/kvm/svm/svm.h    | 3 ++-
 3 files changed, 15 insertions(+), 5 deletions(-)

Comments

Sean Christopherson Sept. 9, 2021, 12:59 a.m. UTC | #1
On Mon, Aug 23, 2021, Maxim Levitsky wrote:
> This allows nested SVM code to be more similar to nested VMX code.
> 
> Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
> ---
>  arch/x86/kvm/svm/nested.c | 9 ++++++---
>  arch/x86/kvm/svm/svm.c    | 8 +++++++-
>  arch/x86/kvm/svm/svm.h    | 3 ++-
>  3 files changed, 15 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 5e13357da21e..678fd21f6077 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -572,7 +572,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
>  }
>  
>  int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
> -			 struct vmcb *vmcb12)
> +			 struct vmcb *vmcb12, bool from_entry)

from_vmrun would be a better name.  VMX uses the slightly absstract from_vmentry
because of the VMLAUNCH vs. VMRESUME silliness.  If we want to explicitly follow
VMX then from_vmentry would be more appropriate, but I don't see any reason not
to be more precise.

>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
>  	int ret;
> @@ -602,13 +602,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
>  	nested_vmcb02_prepare_save(svm, vmcb12);
>  
>  	ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
> -				  nested_npt_enabled(svm), true);
> +				  nested_npt_enabled(svm), from_entry);
>  	if (ret)
>  		return ret;
>  
>  	if (!npt_enabled)
>  		vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
>  
> +	if (!from_entry)
> +		kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
> +
>  	svm_set_gif(svm, true);
>  
>  	return 0;
> @@ -674,7 +677,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
>  
>  	svm->nested.nested_run_pending = 1;
>  
> -	if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
> +	if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
>  		goto out_exit_err;
>  
>  	if (nested_svm_vmrun_msrpm(svm))
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index ea7a4dacd42f..76ee15af8c48 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -4354,6 +4354,12 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
>  			if (svm_allocate_nested(svm))
>  				return 1;
>  
> +			/* Exit from the SMM to the non root mode also uses
> +			 * the KVM_REQ_GET_NESTED_STATE_PAGES request,
> +			 * but in this case the pdptrs must be always reloaded
> +			 */
> +			vcpu->arch.pdptrs_from_userspace = false;

Hmm, I think this belongs in the previous patch.  And I would probably go so far
as to say it belongs in emulator_leave_smm(), i.e. pdptrs_from_userspace should
be cleared on RSM regardless of what mode is being resumed.

> +
>  			/*
>  			 * Restore L1 host state from L1 HSAVE area as VMCB01 was
>  			 * used during SMM (see svm_enter_smm())
> @@ -4368,7 +4374,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
>  
>  			vmcb12 = map.hva;
>  			nested_load_control_from_vmcb12(svm, &vmcb12->control);
> -			ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
> +			ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
>  
>  			kvm_vcpu_unmap(vcpu, &map, true);
>  			kvm_vcpu_unmap(vcpu, &map_save, true);
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 524d943f3efc..51ffa46ab257 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
>  	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
>  }
>  
> -int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
> +int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
> +		u64 vmcb_gpa, struct vmcb *vmcb12, bool from_entry);

Alignment is funky, it can/should match the definition, e.g.

int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
			 struct vmcb *vmcb12, bool from_entry);

>  void svm_leave_nested(struct vcpu_svm *svm);
>  void svm_free_nested(struct vcpu_svm *svm);
>  int svm_allocate_nested(struct vcpu_svm *svm);
> -- 
> 2.26.3
>
Maxim Levitsky Sept. 12, 2021, 10:35 a.m. UTC | #2
On Thu, 2021-09-09 at 00:59 +0000, Sean Christopherson wrote:
> On Mon, Aug 23, 2021, Maxim Levitsky wrote:
> > This allows nested SVM code to be more similar to nested VMX code.
> > 
> > Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
> > ---
> >  arch/x86/kvm/svm/nested.c | 9 ++++++---
> >  arch/x86/kvm/svm/svm.c    | 8 +++++++-
> >  arch/x86/kvm/svm/svm.h    | 3 ++-
> >  3 files changed, 15 insertions(+), 5 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> > index 5e13357da21e..678fd21f6077 100644
> > --- a/arch/x86/kvm/svm/nested.c
> > +++ b/arch/x86/kvm/svm/nested.c
> > @@ -572,7 +572,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
> >  }
> >  
> >  int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
> > -			 struct vmcb *vmcb12)
> > +			 struct vmcb *vmcb12, bool from_entry)
> 
> from_vmrun would be a better name.  VMX uses the slightly absstract from_vmentry
> because of the VMLAUNCH vs. VMRESUME silliness.  If we want to explicitly follow
> VMX then from_vmentry would be more appropriate, but I don't see any reason not
> to be more precise.
OK.

> 
> >  {
> >  	struct vcpu_svm *svm = to_svm(vcpu);
> >  	int ret;
> > @@ -602,13 +602,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
> >  	nested_vmcb02_prepare_save(svm, vmcb12);
> >  
> >  	ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
> > -				  nested_npt_enabled(svm), true);
> > +				  nested_npt_enabled(svm), from_entry);
> >  	if (ret)
> >  		return ret;
> >  
> >  	if (!npt_enabled)
> >  		vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
> >  
> > +	if (!from_entry)
> > +		kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
> > +
> >  	svm_set_gif(svm, true);
> >  
> >  	return 0;
> > @@ -674,7 +677,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
> >  
> >  	svm->nested.nested_run_pending = 1;
> >  
> > -	if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
> > +	if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
> >  		goto out_exit_err;
> >  
> >  	if (nested_svm_vmrun_msrpm(svm))
> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > index ea7a4dacd42f..76ee15af8c48 100644
> > --- a/arch/x86/kvm/svm/svm.c
> > +++ b/arch/x86/kvm/svm/svm.c
> > @@ -4354,6 +4354,12 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
> >  			if (svm_allocate_nested(svm))
> >  				return 1;
> >  
> > +			/* Exit from the SMM to the non root mode also uses
> > +			 * the KVM_REQ_GET_NESTED_STATE_PAGES request,
> > +			 * but in this case the pdptrs must be always reloaded
> > +			 */
> > +			vcpu->arch.pdptrs_from_userspace = false;
> 
> Hmm, I think this belongs in the previous patch.  And I would probably go so far
> as to say it belongs in emulator_leave_smm(), i.e. pdptrs_from_userspace should
> be cleared on RSM regardless of what mode is being resumed.

I actually don't think that this belongs to a previous patch, since this issue didn't exist on SVM,
since it didn't call the KVM_REQ_GET_NESTED_STATE_PAGES.

However I do agree with you that it makes sense to move this hack to the common x86 code.
I had put it to kvm_smm_changed, and will soon send a new version.

> 
> > +
> >  			/*
> >  			 * Restore L1 host state from L1 HSAVE area as VMCB01 was
> >  			 * used during SMM (see svm_enter_smm())
> > @@ -4368,7 +4374,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
> >  
> >  			vmcb12 = map.hva;
> >  			nested_load_control_from_vmcb12(svm, &vmcb12->control);
> > -			ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
> > +			ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
> >  
> >  			kvm_vcpu_unmap(vcpu, &map, true);
> >  			kvm_vcpu_unmap(vcpu, &map_save, true);
> > diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> > index 524d943f3efc..51ffa46ab257 100644
> > --- a/arch/x86/kvm/svm/svm.h
> > +++ b/arch/x86/kvm/svm/svm.h
> > @@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
> >  	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
> >  }
> >  
> > -int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
> > +int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
> > +		u64 vmcb_gpa, struct vmcb *vmcb12, bool from_entry);
> 
> Alignment is funky, it can/should match the definition, e.g.
Oops, forgot to check the prototype - these things you write once and forget about them,
as long as it compiles :-)

> 
> int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
> 			 struct vmcb *vmcb12, bool from_entry);
> 
> >  void svm_leave_nested(struct vcpu_svm *svm);
> >  void svm_free_nested(struct vcpu_svm *svm);
> >  int svm_allocate_nested(struct vcpu_svm *svm);
> > -- 
> > 2.26.3
> > 

Thanks for the review!

Best regards,
	Maxim Levitsky
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 5e13357da21e..678fd21f6077 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -572,7 +572,7 @@  static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
 }
 
 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
-			 struct vmcb *vmcb12)
+			 struct vmcb *vmcb12, bool from_entry)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	int ret;
@@ -602,13 +602,16 @@  int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
 	nested_vmcb02_prepare_save(svm, vmcb12);
 
 	ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
-				  nested_npt_enabled(svm), true);
+				  nested_npt_enabled(svm), from_entry);
 	if (ret)
 		return ret;
 
 	if (!npt_enabled)
 		vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
 
+	if (!from_entry)
+		kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+
 	svm_set_gif(svm, true);
 
 	return 0;
@@ -674,7 +677,7 @@  int nested_svm_vmrun(struct kvm_vcpu *vcpu)
 
 	svm->nested.nested_run_pending = 1;
 
-	if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
+	if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
 		goto out_exit_err;
 
 	if (nested_svm_vmrun_msrpm(svm))
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index ea7a4dacd42f..76ee15af8c48 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4354,6 +4354,12 @@  static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 			if (svm_allocate_nested(svm))
 				return 1;
 
+			/* Exit from the SMM to the non root mode also uses
+			 * the KVM_REQ_GET_NESTED_STATE_PAGES request,
+			 * but in this case the pdptrs must be always reloaded
+			 */
+			vcpu->arch.pdptrs_from_userspace = false;
+
 			/*
 			 * Restore L1 host state from L1 HSAVE area as VMCB01 was
 			 * used during SMM (see svm_enter_smm())
@@ -4368,7 +4374,7 @@  static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 
 			vmcb12 = map.hva;
 			nested_load_control_from_vmcb12(svm, &vmcb12->control);
-			ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
+			ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
 
 			kvm_vcpu_unmap(vcpu, &map, true);
 			kvm_vcpu_unmap(vcpu, &map_save, true);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 524d943f3efc..51ffa46ab257 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -459,7 +459,8 @@  static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
 }
 
-int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
+int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
+		u64 vmcb_gpa, struct vmcb *vmcb12, bool from_entry);
 void svm_leave_nested(struct vcpu_svm *svm);
 void svm_free_nested(struct vcpu_svm *svm);
 int svm_allocate_nested(struct vcpu_svm *svm);