@@ -226,7 +226,7 @@ struct x86_emulate_ops {
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
- int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase);
+ int (*pre_rsm_load_state)(struct x86_emulate_ctxt *ctxt, u64 smbase);
};
@@ -1181,8 +1181,8 @@ struct kvm_x86_ops {
void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
int (*smi_allowed)(struct kvm_vcpu *vcpu);
- int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
- int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
+ int (*post_smi_save_state)(struct kvm_vcpu *vcpu, char *smstate);
+ int (*pre_rsm_load_state)(struct kvm_vcpu *vcpu, u64 smbase);
int (*enable_smi_window)(struct kvm_vcpu *vcpu);
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
@@ -2608,11 +2608,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
smbase = ctxt->ops->get_smbase(ctxt);
/*
- * Give pre_leave_smm() a chance to make ISA-specific changes to the
- * vCPU state (e.g. enter guest mode) before loading state from the SMM
- * state-save area.
+ * Give pre_rsm_load_state() a chance to make ISA-specific changes to
+ * the vCPU state (e.g. enter guest mode) before loading state from the
+ * SMM state-save area.
*/
- if (ctxt->ops->pre_leave_smm(ctxt, smbase))
+ if (ctxt->ops->pre_rsm_load_state(ctxt, smbase))
return X86EMUL_UNHANDLEABLE;
if (emulator_has_longmode(ctxt))
@@ -6193,7 +6193,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu)
return 1;
}
-static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int svm_post_smi_save_state(struct kvm_vcpu *vcpu, char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
int ret;
@@ -6215,7 +6215,7 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0;
}
-static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int svm_pre_rsm_load_state(struct kvm_vcpu *vcpu, u64 smbase)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *nested_vmcb;
@@ -7251,8 +7251,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.setup_mce = svm_setup_mce,
.smi_allowed = svm_smi_allowed,
- .pre_enter_smm = svm_pre_enter_smm,
- .pre_leave_smm = svm_pre_leave_smm,
+ .post_smi_save_state = svm_post_smi_save_state,
+ .pre_rsm_load_state = svm_pre_rsm_load_state,
.enable_smi_window = enable_smi_window,
.mem_enc_op = svm_mem_enc_op,
@@ -7354,7 +7354,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
return 1;
}
-static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int vmx_post_smi_save_state(struct kvm_vcpu *vcpu, char *smstate)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7368,7 +7368,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0;
}
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int vmx_pre_rsm_load_state(struct kvm_vcpu *vcpu, u64 smbase)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int ret;
@@ -7693,8 +7693,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.setup_mce = vmx_setup_mce,
.smi_allowed = vmx_smi_allowed,
- .pre_enter_smm = vmx_pre_enter_smm,
- .pre_leave_smm = vmx_pre_leave_smm,
+ .post_smi_save_state = vmx_post_smi_save_state,
+ .pre_rsm_load_state = vmx_pre_rsm_load_state,
.enable_smi_window = enable_smi_window,
.check_nested_events = NULL,
@@ -5961,9 +5961,9 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
}
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static int emulator_pre_rsm_load_state(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
- return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
+ return kvm_x86_ops->pre_rsm_load_state(emul_to_vcpu(ctxt), smbase);
}
static const struct x86_emulate_ops emulate_ops = {
@@ -6005,7 +6005,7 @@ static const struct x86_emulate_ops emulate_ops = {
.set_nmi_mask = emulator_set_nmi_mask,
.get_hflags = emulator_get_hflags,
.set_hflags = emulator_set_hflags,
- .pre_leave_smm = emulator_pre_leave_smm,
+ .pre_rsm_load_state = emulator_pre_rsm_load_state,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -7523,11 +7523,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
enter_smm_save_state_32(vcpu, buf);
/*
- * Give pre_enter_smm() a chance to make ISA-specific changes to the
- * vCPU state (e.g. leave guest mode) after we've saved the state into
- * the SMM state-save area.
+ * Give post_smi_save_state() a chance to make ISA-specific changes to
+ * the vCPU state (e.g. leave guest mode) after we've saved the state
+ * into the SMM state-save area.
*/
- kvm_x86_ops->pre_enter_smm(vcpu, buf);
+ kvm_x86_ops->post_smi_save_state(vcpu, buf);
vcpu->arch.hflags |= HF_SMM_MASK;
kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
Rename pre_{enter,leave}_smm() to post_smi_state_save() and pre_rsm_load_state() to make it explicitly clear when the callbacks are invoked, e.g. to allow a future patch to add pre_smi_save_state() and post_rsm_load_state(). Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/include/asm/kvm_emulate.h | 2 +- arch/x86/include/asm/kvm_host.h | 4 ++-- arch/x86/kvm/emulate.c | 8 ++++---- arch/x86/kvm/svm.c | 8 ++++---- arch/x86/kvm/vmx/vmx.c | 8 ++++---- arch/x86/kvm/x86.c | 14 +++++++------- 6 files changed, 22 insertions(+), 22 deletions(-)