diff mbox series

[RFC,03/18] KVM: x86: Drop emulator_pre_leave_smm()

Message ID 20190328175557.14408-4-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: clear HF_SMM_MASK before loading state | expand

Commit Message

Sean Christopherson March 28, 2019, 5:55 p.m. UTC
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/include/asm/kvm_emulate.h |  3 ---
 arch/x86/kvm/x86.c                 | 32 ++++++------------------------
 2 files changed, 6 insertions(+), 29 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 4622ac0028d1..51e474944d59 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -207,8 +207,6 @@  struct x86_emulate_ops {
 	int (*cpl)(struct x86_emulate_ctxt *ctxt);
 	int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
 	int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
-	u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt);
-	void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase);
 	int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
 	int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
 	int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
@@ -226,7 +224,6 @@  struct x86_emulate_ops {
 
 	unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
 	void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
-	int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase);
 	int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
 
 };
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 45ea253f41f8..bef35c3a79bf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5893,20 +5893,6 @@  static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
 	return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
 }
 
-static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
-{
-	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-
-	return vcpu->arch.smbase;
-}
-
-static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
-{
-	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-
-	vcpu->arch.smbase = smbase;
-}
-
 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
 			      u32 pmc)
 {
@@ -5962,11 +5948,6 @@  static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
 	kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
 }
 
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
-{
-	return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
-}
-
 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
 {
 	return leave_smm(emul_to_vcpu(ctxt));
@@ -5997,8 +5978,6 @@  static const struct x86_emulate_ops emulate_ops = {
 	.cpl                 = emulator_get_cpl,
 	.get_dr              = emulator_get_dr,
 	.set_dr              = emulator_set_dr,
-	.get_smbase          = emulator_get_smbase,
-	.set_smbase          = emulator_set_smbase,
 	.set_msr             = emulator_set_msr,
 	.get_msr             = emulator_get_msr,
 	.check_pmc	     = emulator_check_pmc,
@@ -6011,7 +5990,6 @@  static const struct x86_emulate_ops emulate_ops = {
 	.set_nmi_mask        = emulator_set_nmi_mask,
 	.get_hflags          = emulator_get_hflags,
 	.set_hflags          = emulator_set_hflags,
-	.pre_leave_smm       = emulator_pre_leave_smm,
 	.leave_smm           = emulator_leave_smm,
 };
 
@@ -7706,6 +7684,7 @@  static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
 
 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
+	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
 	struct desc_struct desc;
 	struct desc_ptr dt;
 	u16 selector;
@@ -7753,13 +7732,14 @@  static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 
 	cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
 
-	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
+	vcpu->arch.smbase = GET_SMSTATE(u32, smbase, 0x7ef8);
 
 	return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
+	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
 	struct desc_struct desc;
 	struct desc_ptr dt;
 	u64 val, cr0, cr3, cr4;
@@ -7781,7 +7761,7 @@  static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
 	cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
 	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
-	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
+	vcpu->arch.smbase =         GET_SMSTATE(u32, smbase, 0x7f00);
 	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
 	ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
 
@@ -7862,14 +7842,14 @@  static int leave_smm(struct kvm_vcpu *vcpu)
 	efer = 0;
 	ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
 
-	smbase = ctxt->ops->get_smbase(ctxt);
+	smbase = vcpu->arch.smbase;
 
 	/*
 	 * Give pre_leave_smm() a chance to make ISA-specific changes to the
 	 * vCPU state (e.g. enter guest mode) before loading state from the SMM
 	 * state-save area.
 	 */
-	if (ctxt->ops->pre_leave_smm(ctxt, smbase))
+	if (kvm_x86_ops->pre_leave_smm(vcpu, smbase))
 		return X86EMUL_UNHANDLEABLE;
 
 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))