diff mbox

[v3,4/6] KVM: nVMX: treat CR4.VMXE as reserved in SMM

Message ID 20170925080904.24850-5-lprosek@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Ladi Prosek Sept. 25, 2017, 8:09 a.m. UTC
Intel SDM 34.14.3 Protection of CR4.VMXE in SMM:

"Under the default treatment, CR4.VMXE is treated as a reserved bit while a
logical processor is in SMM. Any attempt by software running in SMM to set
this bit causes a general-protection exception."

em_rsm() may set CR4.VMXE as part of its loading of saved SMM state so care
must be taken to order this properly (HF_SMM_MASK is required to access the
state-save area but must be cleared before setting CR4.VMXE).

Signed-off-by: Ladi Prosek <lprosek@redhat.com>
---
 arch/x86/kvm/emulate.c | 18 +++++++++++++++---
 arch/x86/kvm/vmx.c     |  4 ++++
 2 files changed, 19 insertions(+), 3 deletions(-)
diff mbox

Patch

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 15f527b44aa7..f12bf51d379a 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2479,6 +2479,9 @@  static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 
 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
 
+	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+		~X86EMUL_SMM_MASK);
+
 	return rsm_enter_protected_mode(ctxt, cr0, cr4);
 }
 
@@ -2531,7 +2534,8 @@  static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
 	ctxt->ops->set_gdt(ctxt, &dt);
 
-	r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+	/* We're still in SMM so CR4.VMXE is reserved. */
+	r = rsm_enter_protected_mode(ctxt, cr0, cr4 & ~X86_CR4_VMXE);
 	if (r != X86EMUL_CONTINUE)
 		return r;
 
@@ -2541,6 +2545,13 @@  static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 			return r;
 	}
 
+	/* Out of SMM now and finish off CR4. */
+	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+		~X86EMUL_SMM_MASK);
+
+	if (ctxt->ops->set_cr(ctxt, 4, cr4))
+		return X86EMUL_UNHANDLEABLE;
+
 	return X86EMUL_CONTINUE;
 }
 
@@ -2601,9 +2612,10 @@  static int em_rsm(struct x86_emulate_ctxt *ctxt)
 
 	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
 		ctxt->ops->set_nmi_mask(ctxt, false);
+	else
+		ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+			~X86EMUL_SMM_INSIDE_NMI_MASK);
 
-	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
-		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
 	return X86EMUL_CONTINUE;
 }
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index be4b5f9a84bf..7e350b8d724d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4366,6 +4366,10 @@  static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 		 */
 		if (!nested_vmx_allowed(vcpu))
 			return 1;
+
+		/* cr4.VMXE is a reserved bit in SMM */
+		if (is_smm(vcpu))
+			return 1;
 	}
 
 	if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))