diff mbox series

[1/2] KVM: x86: Rename pre_{enter,leave}_smm() ops to reference SMM state save

Message ID 20190327192946.19128-2-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series [1/2] KVM: x86: Rename pre_{enter,leave}_smm() ops to reference SMM state save | expand

Commit Message

Sean Christopherson March 27, 2019, 7:29 p.m. UTC
Rename pre_{enter,leave}_smm() to post_smi_state_save() and
pre_rsm_load_state() to make it explicitly clear when the callbacks are
invoked, e.g. to allow a future patch to add pre_smi_save_state() and
post_rsm_load_state().

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/include/asm/kvm_emulate.h |  2 +-
 arch/x86/include/asm/kvm_host.h    |  4 ++--
 arch/x86/kvm/emulate.c             |  8 ++++----
 arch/x86/kvm/svm.c                 |  8 ++++----
 arch/x86/kvm/vmx/vmx.c             |  8 ++++----
 arch/x86/kvm/x86.c                 | 14 +++++++-------
 6 files changed, 22 insertions(+), 22 deletions(-)

Comments

Jim Mattson March 27, 2019, 7:59 p.m. UTC | #1
On Wed, Mar 27, 2019 at 12:31 PM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> Rename pre_{enter,leave}_smm() to post_smi_state_save() and
> pre_rsm_load_state() to make it explicitly clear when the callbacks are
> invoked, e.g. to allow a future patch to add pre_smi_save_state() and
> post_rsm_load_state().
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 93c4bf598fb0..b2a65d08d1f8 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -226,7 +226,7 @@  struct x86_emulate_ops {
 
 	unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
 	void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
-	int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase);
+	int (*pre_rsm_load_state)(struct x86_emulate_ctxt *ctxt, u64 smbase);
 
 };
 
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 790876082a77..29ce45f41ee5 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1181,8 +1181,8 @@  struct kvm_x86_ops {
 	void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
 
 	int (*smi_allowed)(struct kvm_vcpu *vcpu);
-	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
-	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
+	int (*post_smi_save_state)(struct kvm_vcpu *vcpu, char *smstate);
+	int (*pre_rsm_load_state)(struct kvm_vcpu *vcpu, u64 smbase);
 	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
 
 	int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index c338984c850d..ca60eb3358d9 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2608,11 +2608,11 @@  static int em_rsm(struct x86_emulate_ctxt *ctxt)
 	smbase = ctxt->ops->get_smbase(ctxt);
 
 	/*
-	 * Give pre_leave_smm() a chance to make ISA-specific changes to the
-	 * vCPU state (e.g. enter guest mode) before loading state from the SMM
-	 * state-save area.
+	 * Give pre_rsm_load_state() a chance to make ISA-specific changes to
+	 * the vCPU state (e.g. enter guest mode) before loading state from the
+	 * SMM state-save area.
 	 */
-	if (ctxt->ops->pre_leave_smm(ctxt, smbase))
+	if (ctxt->ops->pre_rsm_load_state(ctxt, smbase))
 		return X86EMUL_UNHANDLEABLE;
 
 	if (emulator_has_longmode(ctxt))
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 426039285fd1..c0e511c3b298 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -6193,7 +6193,7 @@  static int svm_smi_allowed(struct kvm_vcpu *vcpu)
 	return 1;
 }
 
-static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int svm_post_smi_save_state(struct kvm_vcpu *vcpu, char *smstate)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	int ret;
@@ -6215,7 +6215,7 @@  static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 	return 0;
 }
 
-static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int svm_pre_rsm_load_state(struct kvm_vcpu *vcpu, u64 smbase)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct vmcb *nested_vmcb;
@@ -7251,8 +7251,8 @@  static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.setup_mce = svm_setup_mce,
 
 	.smi_allowed = svm_smi_allowed,
-	.pre_enter_smm = svm_pre_enter_smm,
-	.pre_leave_smm = svm_pre_leave_smm,
+	.post_smi_save_state = svm_post_smi_save_state,
+	.pre_rsm_load_state = svm_pre_rsm_load_state,
 	.enable_smi_window = enable_smi_window,
 
 	.mem_enc_op = svm_mem_enc_op,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 02cf8a551bd1..fc96101e39ac 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7354,7 +7354,7 @@  static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
 	return 1;
 }
 
-static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int vmx_post_smi_save_state(struct kvm_vcpu *vcpu, char *smstate)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 
@@ -7368,7 +7368,7 @@  static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 	return 0;
 }
 
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int vmx_pre_rsm_load_state(struct kvm_vcpu *vcpu, u64 smbase)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	int ret;
@@ -7693,8 +7693,8 @@  static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.setup_mce = vmx_setup_mce,
 
 	.smi_allowed = vmx_smi_allowed,
-	.pre_enter_smm = vmx_pre_enter_smm,
-	.pre_leave_smm = vmx_pre_leave_smm,
+	.post_smi_save_state = vmx_post_smi_save_state,
+	.pre_rsm_load_state = vmx_pre_rsm_load_state,
 	.enable_smi_window = enable_smi_window,
 
 	.check_nested_events = NULL,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a419656521b6..abfac99fb7c5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5961,9 +5961,9 @@  static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
 	kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
 }
 
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static int emulator_pre_rsm_load_state(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
-	return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
+	return kvm_x86_ops->pre_rsm_load_state(emul_to_vcpu(ctxt), smbase);
 }
 
 static const struct x86_emulate_ops emulate_ops = {
@@ -6005,7 +6005,7 @@  static const struct x86_emulate_ops emulate_ops = {
 	.set_nmi_mask        = emulator_set_nmi_mask,
 	.get_hflags          = emulator_get_hflags,
 	.set_hflags          = emulator_set_hflags,
-	.pre_leave_smm       = emulator_pre_leave_smm,
+	.pre_rsm_load_state  = emulator_pre_rsm_load_state,
 };
 
 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -7523,11 +7523,11 @@  static void enter_smm(struct kvm_vcpu *vcpu)
 		enter_smm_save_state_32(vcpu, buf);
 
 	/*
-	 * Give pre_enter_smm() a chance to make ISA-specific changes to the
-	 * vCPU state (e.g. leave guest mode) after we've saved the state into
-	 * the SMM state-save area.
+	 * Give post_smi_save_state() a chance to make ISA-specific changes to
+	 * the vCPU state (e.g. leave guest mode) after we've saved the state
+	 * into the SMM state-save area.
 	 */
-	kvm_x86_ops->pre_enter_smm(vcpu, buf);
+	kvm_x86_ops->post_smi_save_state(vcpu, buf);
 
 	vcpu->arch.hflags |= HF_SMM_MASK;
 	kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));