diff mbox series

[RFC,17/18] KVM: x86: Load SMRAM in a single shot when leaving SMM

Message ID 20190328175557.14408-18-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: clear HF_SMM_MASK before loading state | expand

Commit Message

Sean Christopherson March 28, 2019, 5:55 p.m. UTC
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/include/asm/kvm_host.h |   5 +-
 arch/x86/kvm/svm.c              |  20 ++---
 arch/x86/kvm/vmx/vmx.c          |   2 +-
 arch/x86/kvm/x86.c              | 142 +++++++++++++++-----------------
 4 files changed, 81 insertions(+), 88 deletions(-)

Comments

Sean Christopherson March 29, 2019, 4:06 p.m. UTC | #1
On Thu, Mar 28, 2019 at 10:55:56AM -0700, Sean Christopherson wrote:
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/x86/include/asm/kvm_host.h |   5 +-
>  arch/x86/kvm/svm.c              |  20 ++---
>  arch/x86/kvm/vmx/vmx.c          |   2 +-
>  arch/x86/kvm/x86.c              | 142 +++++++++++++++-----------------
>  4 files changed, 81 insertions(+), 88 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 790876082a77..773f403d7017 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1182,7 +1182,7 @@ struct kvm_x86_ops {
>  
>  	int (*smi_allowed)(struct kvm_vcpu *vcpu);
>  	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
> -	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
> +	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
>  	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
>  
>  	int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
> @@ -1592,4 +1592,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
>  #define put_smstate(type, buf, offset, val)                      \
>  	*(type *)((buf) + (offset) - 0x7e00) = val
>  
> +#define GET_SMSTATE(type, buf, offset)		\
> +	(*(type *)((buf) + (offset) - 0x7e00))
> +
>  #endif /* _ASM_X86_KVM_HOST_H */
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 426039285fd1..33975e2aa486 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -6215,27 +6215,23 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
>  	return 0;
>  }
>  
> -static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
> +static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
>  	struct vmcb *nested_vmcb;
>  	struct page *page;
> -	struct {
> -		u64 guest;
> -		u64 vmcb;
> -	} svm_state_save;
> +	u64 guest;
> +	u64 vmcb;
>  	int ret;
>  
> -	ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
> -				  sizeof(svm_state_save));
> -	if (ret)
> -		return ret;
> +	guest = GET_SMSTATE(u64, smsate, 0x7ed8);
> +	vmcb = GET_SMSTATE(u64, smsate, 0x7ee0);
                                ^^^^^^

Doh, this doesn't compile due to a typo, didn't have CONFIG_KVM_AMD selected...
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 790876082a77..773f403d7017 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1182,7 +1182,7 @@  struct kvm_x86_ops {
 
 	int (*smi_allowed)(struct kvm_vcpu *vcpu);
 	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
-	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
+	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
 	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
 
 	int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
@@ -1592,4 +1592,7 @@  static inline int kvm_cpu_get_apicid(int mps_cpu)
 #define put_smstate(type, buf, offset, val)                      \
 	*(type *)((buf) + (offset) - 0x7e00) = val
 
+#define GET_SMSTATE(type, buf, offset)		\
+	(*(type *)((buf) + (offset) - 0x7e00))
+
 #endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 426039285fd1..33975e2aa486 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -6215,27 +6215,23 @@  static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 	return 0;
 }
 
-static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct vmcb *nested_vmcb;
 	struct page *page;
-	struct {
-		u64 guest;
-		u64 vmcb;
-	} svm_state_save;
+	u64 guest;
+	u64 vmcb;
 	int ret;
 
-	ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
-				  sizeof(svm_state_save));
-	if (ret)
-		return ret;
+	guest = GET_SMSTATE(u64, smsate, 0x7ed8);
+	vmcb = GET_SMSTATE(u64, smsate, 0x7ee0);
 
-	if (svm_state_save.guest) {
+	if (guest) {
 		vcpu->arch.hflags &= ~HF_SMM_MASK;
-		nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
+		nested_vmcb = nested_svm_map(svm, vmcb, &page);
 		if (nested_vmcb)
-			enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
+			enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
 		else
 			ret = 1;
 		vcpu->arch.hflags |= HF_SMM_MASK;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 02cf8a551bd1..4612a1e7585e 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7368,7 +7368,7 @@  static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 	return 0;
 }
 
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	int ret;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6c48c4e4383d..1e510ca8a8e4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7547,14 +7547,6 @@  static void enter_smm(struct kvm_vcpu *vcpu)
 	kvm_mmu_reset_context(vcpu);
 }
 
-#define GET_SMSTATE(type, smbase, offset)				  \
-	({								  \
-	 type __val;							  \
-	 if (kvm_vcpu_read_guest(vcpu, smbase + offset, &__val, sizeof(__val))) \
-		 return X86EMUL_UNHANDLEABLE;				  \
-	 __val;								  \
-	})
-
 static void rsm_set_seg(struct kvm_vcpu *vcpu, int seg, u16 sel, u64 base,
 			u32 limit, u32 flags)
 {
@@ -7585,12 +7577,12 @@  static void rsm_set_seg_64(struct kvm_vcpu *vcpu, int seg, u16 sel,
 	rsm_set_seg(vcpu, seg, sel, base_lo | (base_hi << 32), limit, flags);
 }
 
-static int rsm_load_seg_32(struct kvm_vcpu *vcpu, u64 smbase, int n)
+static int rsm_load_seg_32(struct kvm_vcpu *vcpu, const char *buf, int n)
 {
 	int offset;
 	u16 selector;
 
-	selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
+	selector = GET_SMSTATE(u32, buf, 0x7fa8 + n * 4);
 
 	if (n < 3)
 		offset = 0x7f84 + n * 12;
@@ -7598,25 +7590,25 @@  static int rsm_load_seg_32(struct kvm_vcpu *vcpu, u64 smbase, int n)
 		offset = 0x7f2c + (n - 3) * 12;
 
 	rsm_set_seg(vcpu, n, selector,
-		    GET_SMSTATE(u32, smbase, offset + 8),
-		    GET_SMSTATE(u32, smbase, offset + 4),
-		    GET_SMSTATE(u32, smbase, offset));
+		    GET_SMSTATE(u32, buf, offset + 8),
+		    GET_SMSTATE(u32, buf, offset + 4),
+		    GET_SMSTATE(u32, buf, offset));
 
 	return X86EMUL_CONTINUE;
 }
 
-static int rsm_load_seg_64(struct kvm_vcpu *vcpu, u64 smbase, int n)
+static int rsm_load_seg_64(struct kvm_vcpu *vcpu, const char *buf, int n)
 {
 	int offset;
 
 	offset = 0x7e00 + n * 16;
 
 	rsm_set_seg_64(vcpu, n,
-		       GET_SMSTATE(u16, smbase, offset),
-		       GET_SMSTATE(u32, smbase, offset + 8),
-		       GET_SMSTATE(u32, smbase, offset + 12),
-		       GET_SMSTATE(u32, smbase, offset + 4),
-		       GET_SMSTATE(u16, smbase, offset + 2) << 8);
+		       GET_SMSTATE(u16, buf, offset),
+		       GET_SMSTATE(u32, buf, offset + 8),
+		       GET_SMSTATE(u32, buf, offset + 12),
+		       GET_SMSTATE(u32, buf, offset + 4),
+		       GET_SMSTATE(u16, buf, offset + 2) << 8);
 
 	return X86EMUL_CONTINUE;
 }
@@ -7673,60 +7665,60 @@  static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
 	return &ctxt->_regs[nr];
 }
 
-static int rsm_load_state_32(struct kvm_vcpu *vcpu, u64 smbase)
+static int rsm_load_state_32(struct kvm_vcpu *vcpu, const char *buf)
 {
 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
 	struct desc_ptr dt;
 	u32 val, cr0, cr3, cr4;
 	int i;
 
-	cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
-	cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
-	ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
-	ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
+	cr0 =          GET_SMSTATE(u32, buf, 0x7ffc);
+	cr3 =          GET_SMSTATE(u32, buf, 0x7ff8);
+	ctxt->eflags = GET_SMSTATE(u32, buf, 0x7ff4) | X86_EFLAGS_FIXED;
+	ctxt->_eip =   GET_SMSTATE(u32, buf, 0x7ff0);
 
 	for (i = 0; i < 8; i++)
-		*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
+		*reg_write(ctxt, i) = GET_SMSTATE(u32, buf, 0x7fd0 + i * 4);
 
-	val = GET_SMSTATE(u32, smbase, 0x7fcc);
+	val = GET_SMSTATE(u32, buf, 0x7fcc);
 	__kvm_set_dr(vcpu, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
-	val = GET_SMSTATE(u32, smbase, 0x7fc8);
+	val = GET_SMSTATE(u32, buf, 0x7fc8);
 	__kvm_set_dr(vcpu, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
 	rsm_set_seg(vcpu, VCPU_SREG_TR,
-		    GET_SMSTATE(u32, smbase, 0x7fc4),
-		    GET_SMSTATE(u32, smbase, 0x7f64),
-		    GET_SMSTATE(u32, smbase, 0x7f60),
-		    GET_SMSTATE(u32, smbase, 0x7f5c));
+		    GET_SMSTATE(u32, buf, 0x7fc4),
+		    GET_SMSTATE(u32, buf, 0x7f64),
+		    GET_SMSTATE(u32, buf, 0x7f60),
+		    GET_SMSTATE(u32, buf, 0x7f5c));
 
 	rsm_set_seg(vcpu, VCPU_SREG_LDTR,
-		    GET_SMSTATE(u32, smbase, 0x7fc0),
-		    GET_SMSTATE(u32, smbase, 0x7f80),
-		    GET_SMSTATE(u32, smbase, 0x7f7c),
-		    GET_SMSTATE(u32, smbase, 0x7f78));
+		    GET_SMSTATE(u32, buf, 0x7fc0),
+		    GET_SMSTATE(u32, buf, 0x7f80),
+		    GET_SMSTATE(u32, buf, 0x7f7c),
+		    GET_SMSTATE(u32, buf, 0x7f78));
 
-	dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
-	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
+	dt.address = GET_SMSTATE(u32, buf, 0x7f74);
+	dt.size =    GET_SMSTATE(u32, buf, 0x7f70);
 	kvm_x86_ops->set_gdt(vcpu, &dt);
 
-	dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
-	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
+	dt.address = GET_SMSTATE(u32, buf, 0x7f58);
+	dt.size =    GET_SMSTATE(u32, buf, 0x7f54);
 	kvm_x86_ops->set_idt(vcpu, &dt);
 
 	for (i = 0; i < 6; i++) {
-		int r = rsm_load_seg_32(vcpu, smbase, i);
+		int r = rsm_load_seg_32(vcpu, buf, i);
 		if (r != X86EMUL_CONTINUE)
 			return r;
 	}
 
-	cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
+	cr4 = GET_SMSTATE(u32, buf, 0x7f14);
 
-	vcpu->arch.smbase = GET_SMSTATE(u32, smbase, 0x7ef8);
+	vcpu->arch.smbase = GET_SMSTATE(u32, buf, 0x7ef8);
 
 	return rsm_enter_protected_mode(vcpu, cr0, cr3, cr4);
 }
 
-static int rsm_load_state_64(struct kvm_vcpu *vcpu, u64 smbase)
+static int rsm_load_state_64(struct kvm_vcpu *vcpu, const char *buf)
 {
 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
 	struct desc_ptr dt;
@@ -7734,43 +7726,43 @@  static int rsm_load_state_64(struct kvm_vcpu *vcpu, u64 smbase)
 	int i, r;
 
 	for (i = 0; i < 16; i++)
-		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
+		*reg_write(ctxt, i) = GET_SMSTATE(u64, buf, 0x7ff8 - i * 8);
 
-	ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
-	ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
+	ctxt->_eip   = GET_SMSTATE(u64, buf, 0x7f78);
+	ctxt->eflags = GET_SMSTATE(u32, buf, 0x7f70) | X86_EFLAGS_FIXED;
 
-	val = GET_SMSTATE(u32, smbase, 0x7f68);
+	val = GET_SMSTATE(u32, buf, 0x7f68);
 	__kvm_set_dr(vcpu, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
-	val = GET_SMSTATE(u32, smbase, 0x7f60);
+	val = GET_SMSTATE(u32, buf, 0x7f60);
 	__kvm_set_dr(vcpu, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
-	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
-	cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
-	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
-	vcpu->arch.smbase =         GET_SMSTATE(u32, smbase, 0x7f00);
-	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
+	cr0 =               GET_SMSTATE(u64, buf, 0x7f58);
+	cr3 =               GET_SMSTATE(u64, buf, 0x7f50);
+	cr4 =               GET_SMSTATE(u64, buf, 0x7f48);
+	vcpu->arch.smbase = GET_SMSTATE(u32, buf, 0x7f00);
+	val =               GET_SMSTATE(u64, buf, 0x7ed0);
 	emulator_set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
 
 	rsm_set_seg_64(vcpu, VCPU_SREG_TR,
-		       GET_SMSTATE(u32, smbase, 0x7e90),
-		       GET_SMSTATE(u32, smbase, 0x7e98),
-		       GET_SMSTATE(u32, smbase, 0x7e9c),
-		       GET_SMSTATE(u32, smbase, 0x7e94),
-		       GET_SMSTATE(u32, smbase, 0x7e92) << 8);
+		       GET_SMSTATE(u32, buf, 0x7e90),
+		       GET_SMSTATE(u32, buf, 0x7e98),
+		       GET_SMSTATE(u32, buf, 0x7e9c),
+		       GET_SMSTATE(u32, buf, 0x7e94),
+		       GET_SMSTATE(u32, buf, 0x7e92) << 8);
 
-	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
-	dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
+	dt.size =      GET_SMSTATE(u32, buf, 0x7e84);
+	dt.address =   GET_SMSTATE(u64, buf, 0x7e88);
 	kvm_x86_ops->set_idt(vcpu, &dt);
 
 	rsm_set_seg_64(vcpu, VCPU_SREG_LDTR,
-		       GET_SMSTATE(u32, smbase, 0x7e70),
-		       GET_SMSTATE(u32, smbase, 0x7e78),
-		       GET_SMSTATE(u32, smbase, 0x7e7c),
-		       GET_SMSTATE(u32, smbase, 0x7e74),
-		       GET_SMSTATE(u32, smbase, 0x7e72) << 8);
+		       GET_SMSTATE(u32, buf, 0x7e70),
+		       GET_SMSTATE(u32, buf, 0x7e78),
+		       GET_SMSTATE(u32, buf, 0x7e7c),
+		       GET_SMSTATE(u32, buf, 0x7e74),
+		       GET_SMSTATE(u32, buf, 0x7e72) << 8);
 
-	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
-	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
+	dt.size =      GET_SMSTATE(u32, buf, 0x7e64);
+	dt.address =   GET_SMSTATE(u64, buf, 0x7e68);
 	kvm_x86_ops->set_gdt(vcpu, &dt);
 
 	r = rsm_enter_protected_mode(vcpu, cr0, cr3, cr4);
@@ -7778,7 +7770,7 @@  static int rsm_load_state_64(struct kvm_vcpu *vcpu, u64 smbase)
 		return r;
 
 	for (i = 0; i < 6; i++) {
-		r = rsm_load_seg_64(vcpu, smbase, i);
+		r = rsm_load_seg_64(vcpu, buf, i);
 		if (r != X86EMUL_CONTINUE)
 			return r;
 	}
@@ -7789,9 +7781,13 @@  static int rsm_load_state_64(struct kvm_vcpu *vcpu, u64 smbase)
 static int leave_smm(struct kvm_vcpu *vcpu)
 {
 	unsigned long cr0, cr4;
-	u64 smbase;
+	char buf[512];
 	int ret;
 
+	if (kvm_vcpu_read_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf,
+				sizeof(buf)))
+		return X86EMUL_UNHANDLEABLE;
+
 	/*
 	 * Get back to real mode, to prepare a safe state in which to load
 	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
@@ -7827,20 +7823,18 @@  static int leave_smm(struct kvm_vcpu *vcpu)
 	/* And finally go back to 32-bit mode.  */
 	emulator_set_msr(&vcpu->arch.emulate_ctxt, MSR_EFER, 0);
 
-	smbase = vcpu->arch.smbase;
-
 	/*
 	 * Give pre_leave_smm() a chance to make ISA-specific changes to the
 	 * vCPU state (e.g. enter guest mode) before loading state from the SMM
 	 * state-save area.
 	 */
-	if (kvm_x86_ops->pre_leave_smm(vcpu, smbase))
+	if (kvm_x86_ops->pre_leave_smm(vcpu, buf))
 		return X86EMUL_UNHANDLEABLE;
 
 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
-		ret = rsm_load_state_64(vcpu, smbase + 0x8000);
+		ret = rsm_load_state_64(vcpu, buf);
 	else
-		ret = rsm_load_state_32(vcpu, smbase + 0x8000);
+		ret = rsm_load_state_32(vcpu, buf);
 
 	if (ret != X86EMUL_CONTINUE) {
 		/* FIXME: should triple fault */