diff mbox series

[3/3] KVM: x86: clear SMM flags before loading state while leaving SMM

Message ID 20190402150311.29481-4-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: clear HF_SMM_MASK before loading state | expand

Commit Message

Sean Christopherson April 2, 2019, 3:03 p.m. UTC
RSM emulation is currently broken on VMX when the interrupted guest has
CR4.VMXE=1.  Stop dancing around the issue of HF_SMM_MASK being set when
loading SMSTATE into architectural state, e.g. by toggling it for
problematic flows, and simply clear HF_SMM_MASK prior to loading
architectural state (from SMRAM save state area).

Reported-by: Jon Doron <arilou@gmail.com>
Cc: Jim Mattson <jmattson@google.com>
Cc: Liran Alon <liran.alon@oracle.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Fixes: 5bea5123cbf0 ("KVM: VMX: check nested state and CR4.VMXE against SMM")
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/emulate.c | 12 ++++++------
 arch/x86/kvm/svm.c     | 12 ++++--------
 arch/x86/kvm/vmx/vmx.c |  2 --
 3 files changed, 10 insertions(+), 16 deletions(-)

Comments

Vitaly Kuznetsov April 3, 2019, 11:23 a.m. UTC | #1
Sean Christopherson <sean.j.christopherson@intel.com> writes:

> RSM emulation is currently broken on VMX when the interrupted guest has
> CR4.VMXE=1.  Stop dancing around the issue of HF_SMM_MASK being set when
> loading SMSTATE into architectural state, e.g. by toggling it for
> problematic flows, and simply clear HF_SMM_MASK prior to loading
> architectural state (from SMRAM save state area).
>
> Reported-by: Jon Doron <arilou@gmail.com>
> Cc: Jim Mattson <jmattson@google.com>
> Cc: Liran Alon <liran.alon@oracle.com>
> Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
> Fixes: 5bea5123cbf0 ("KVM: VMX: check nested state and CR4.VMXE against SMM")
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>

I checked Hyper-V 2019 + secureboot on both VMX and SVM and they seem to
boot fine. So,

Tested-by: Vitaly Kuznetsov <vkuznets@redhat.com>

for the series. Thanks!

> ---
>  arch/x86/kvm/emulate.c | 12 ++++++------
>  arch/x86/kvm/svm.c     | 12 ++++--------
>  arch/x86/kvm/vmx/vmx.c |  2 --
>  3 files changed, 10 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index d4787fcf28c1..f92f8f1ebc80 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -2571,6 +2571,12 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
>  	if (ret != X86EMUL_CONTINUE)
>  		return X86EMUL_UNHANDLEABLE;
>  
> +	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
> +		ctxt->ops->set_nmi_mask(ctxt, false);
> +
> +	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
> +		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
> +
>  	/*
>  	 * Get back to real mode, to prepare a safe state in which to load
>  	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
> @@ -2624,12 +2630,6 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
>  		return X86EMUL_UNHANDLEABLE;
>  	}
>  
> -	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
> -		ctxt->ops->set_nmi_mask(ctxt, false);
> -
> -	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
> -		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
> -
>  	ctxt->ops->smm_changed(ctxt);
>  
>  	return X86EMUL_CONTINUE;
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index d428ea9da9be..c9c5836b82eb 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -6222,21 +6222,17 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
>  	struct page *page;
>  	u64 guest;
>  	u64 vmcb;
> -	int ret;
>  
>  	guest = GET_SMSTATE(u64, smstate, 0x7ed8);
>  	vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
>  
>  	if (guest) {
> -		vcpu->arch.hflags &= ~HF_SMM_MASK;
>  		nested_vmcb = nested_svm_map(svm, vmcb, &page);
> -		if (nested_vmcb)
> -			enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
> -		else
> -			ret = 1;
> -		vcpu->arch.hflags |= HF_SMM_MASK;
> +		if (!nested_vmcb)
> +			return 1;
> +		enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
>  	}
> -	return ret;
> +	return 0;
>  }
>  
>  static int enable_smi_window(struct kvm_vcpu *vcpu)
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 778db914c414..0b391771d570 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -7380,9 +7380,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
>  	}
>  
>  	if (vmx->nested.smm.guest_mode) {
> -		vcpu->arch.hflags &= ~HF_SMM_MASK;
>  		ret = nested_vmx_enter_non_root_mode(vcpu, false);
> -		vcpu->arch.hflags |= HF_SMM_MASK;
>  		if (ret)
>  			return ret;
Vitaly Kuznetsov April 3, 2019, 11:25 a.m. UTC | #2
Vitaly Kuznetsov <vkuznets@redhat.com> writes:

> Sean Christopherson <sean.j.christopherson@intel.com> writes:
>
>> RSM emulation is currently broken on VMX when the interrupted guest has
>> CR4.VMXE=1.  Stop dancing around the issue of HF_SMM_MASK being set when
>> loading SMSTATE into architectural state, e.g. by toggling it for
>> problematic flows, and simply clear HF_SMM_MASK prior to loading
>> architectural state (from SMRAM save state area).
>>
>> Reported-by: Jon Doron <arilou@gmail.com>
>> Cc: Jim Mattson <jmattson@google.com>
>> Cc: Liran Alon <liran.alon@oracle.com>
>> Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
>> Fixes: 5bea5123cbf0 ("KVM: VMX: check nested state and CR4.VMXE against SMM")
>> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
>
> I checked Hyper-V 2019 + secureboot on both VMX and SVM and they seem to
> boot fine. So,

Doh, it actually was Hyper-V 2016, not 2019.
diff mbox series

Patch

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d4787fcf28c1..f92f8f1ebc80 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2571,6 +2571,12 @@  static int em_rsm(struct x86_emulate_ctxt *ctxt)
 	if (ret != X86EMUL_CONTINUE)
 		return X86EMUL_UNHANDLEABLE;
 
+	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
+		ctxt->ops->set_nmi_mask(ctxt, false);
+
+	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
+
 	/*
 	 * Get back to real mode, to prepare a safe state in which to load
 	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
@@ -2624,12 +2630,6 @@  static int em_rsm(struct x86_emulate_ctxt *ctxt)
 		return X86EMUL_UNHANDLEABLE;
 	}
 
-	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
-		ctxt->ops->set_nmi_mask(ctxt, false);
-
-	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
-		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
-
 	ctxt->ops->smm_changed(ctxt);
 
 	return X86EMUL_CONTINUE;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d428ea9da9be..c9c5836b82eb 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -6222,21 +6222,17 @@  static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 	struct page *page;
 	u64 guest;
 	u64 vmcb;
-	int ret;
 
 	guest = GET_SMSTATE(u64, smstate, 0x7ed8);
 	vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
 
 	if (guest) {
-		vcpu->arch.hflags &= ~HF_SMM_MASK;
 		nested_vmcb = nested_svm_map(svm, vmcb, &page);
-		if (nested_vmcb)
-			enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
-		else
-			ret = 1;
-		vcpu->arch.hflags |= HF_SMM_MASK;
+		if (!nested_vmcb)
+			return 1;
+		enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
 	}
-	return ret;
+	return 0;
 }
 
 static int enable_smi_window(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 778db914c414..0b391771d570 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7380,9 +7380,7 @@  static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 	}
 
 	if (vmx->nested.smm.guest_mode) {
-		vcpu->arch.hflags &= ~HF_SMM_MASK;
 		ret = nested_vmx_enter_non_root_mode(vcpu, false);
-		vcpu->arch.hflags |= HF_SMM_MASK;
 		if (ret)
 			return ret;