diff mbox series

[RFC,15/18] KVM: x86: Invert passing of vcpu and ctxt when leaving SMM

Message ID 20190328175557.14408-16-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: clear HF_SMM_MASK before loading state | expand

Commit Message

Sean Christopherson March 28, 2019, 5:55 p.m. UTC
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/x86.c | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 55687273d428..282c85e41be8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7682,9 +7682,9 @@  static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
 	return &ctxt->_regs[nr];
 }
 
-static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static int rsm_load_state_32(struct kvm_vcpu *vcpu, u64 smbase)
 {
-	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
 	struct desc_ptr dt;
 	u32 val, cr0, cr3, cr4;
 	int i;
@@ -7735,9 +7735,9 @@  static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	return rsm_enter_protected_mode(vcpu, cr0, cr3, cr4);
 }
 
-static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static int rsm_load_state_64(struct kvm_vcpu *vcpu, u64 smbase)
 {
-	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
 	struct desc_ptr dt;
 	u64 val, cr0, cr3, cr4;
 	int i, r;
@@ -7797,7 +7797,6 @@  static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 
 static int leave_smm(struct kvm_vcpu *vcpu)
 {
-	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
 	unsigned long cr0, cr4;
 	u64 smbase;
 	int ret;
@@ -7835,7 +7834,7 @@  static int leave_smm(struct kvm_vcpu *vcpu)
 		kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PAE);
 
 	/* And finally go back to 32-bit mode.  */
-	emulator_set_msr(ctxt, MSR_EFER, 0);
+	emulator_set_msr(&vcpu->arch.emulate_ctxt, MSR_EFER, 0);
 
 	smbase = vcpu->arch.smbase;
 
@@ -7848,9 +7847,9 @@  static int leave_smm(struct kvm_vcpu *vcpu)
 		return X86EMUL_UNHANDLEABLE;
 
 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
-		ret = rsm_load_state_64(ctxt, smbase + 0x8000);
+		ret = rsm_load_state_64(vcpu, smbase + 0x8000);
 	else
-		ret = rsm_load_state_32(ctxt, smbase + 0x8000);
+		ret = rsm_load_state_32(vcpu, smbase + 0x8000);
 
 	if (ret != X86EMUL_CONTINUE) {
 		/* FIXME: should triple fault */