@@ -298,6 +298,7 @@ struct x86_emulate_ctxt {
bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */
bool tf; /* TF value before instruction (after for syscall/sysret) */
+ bool left_smm; /* post_leave_smm() needs to be called after emulation */
bool have_exception;
struct x86_exception exception;
@@ -1058,6 +1058,9 @@ struct kvm_x86_ops {
void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
void (*setup_mce)(struct kvm_vcpu *vcpu);
+
+ int (*prep_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
+ int (*post_leave_smm)(struct kvm_vcpu *vcpu);
};
struct kvm_arch_async_pf {
@@ -2601,6 +2601,8 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
+ ctxt->left_smm = true;
+
return X86EMUL_CONTINUE;
}
@@ -5357,6 +5357,18 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
vcpu->arch.mcg_cap &= 0x1ff;
}
+static int svm_prep_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+ /* TODO: Implement */
+ return 0;
+}
+
+static int svm_post_leave_smm(struct kvm_vcpu *vcpu)
+{
+ /* TODO: Implement */
+ return 0;
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -5467,6 +5479,9 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.deliver_posted_interrupt = svm_deliver_avic_intr,
.update_pi_irte = svm_update_pi_irte,
.setup_mce = svm_setup_mce,
+
+ .prep_enter_smm = svm_prep_enter_smm,
+ .post_leave_smm = svm_post_leave_smm,
};
static int __init svm_init(void)
@@ -11629,6 +11629,18 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
~FEATURE_CONTROL_LMCE;
}
+static int vmx_prep_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+ /* TODO: Implement */
+ return 0;
+}
+
+static int vmx_post_leave_smm(struct kvm_vcpu *vcpu)
+{
+ /* TODO: Implement */
+ return 0;
+}
+
static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -11754,6 +11766,9 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
#endif
.setup_mce = vmx_setup_mce,
+
+ .prep_enter_smm = vmx_prep_enter_smm,
+ .post_leave_smm = vmx_post_leave_smm,
};
static int __init vmx_init(void)
@@ -5674,6 +5674,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
ctxt->have_exception = false;
ctxt->exception.vector = -1;
ctxt->perm_ok = false;
+ ctxt->left_smm = false;
ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
@@ -5755,6 +5756,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
+ if (r == EMULATE_DONE && ctxt->left_smm)
+ kvm_x86_ops->post_leave_smm(vcpu);
if (r == EMULATE_DONE &&
(ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
kvm_vcpu_do_singlestep(vcpu, &r);
@@ -6614,6 +6617,9 @@ static void enter_smm(struct kvm_vcpu *vcpu)
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
vcpu->arch.hflags |= HF_SMM_MASK;
memset(buf, 0, 512);
+
+ kvm_x86_ops->prep_enter_smm(vcpu, buf);
+
if (guest_cpuid_has_longmode(vcpu))
enter_smm_save_state_64(vcpu, buf);
else
Entering and exiting SMM may require ISA specific handling under certain circumstances. This commit adds two new callbacks with empty implementations. Actual functionality will be added in following commits. * prep_enter_smm() is to be called when injecting an SMM, before any SMM related vcpu state has been changed * post_leave_smm() is to be called when emulating the RSM instruction, after all SMM related vcpu state has been restored Signed-off-by: Ladi Prosek <lprosek@redhat.com> --- arch/x86/include/asm/kvm_emulate.h | 1 + arch/x86/include/asm/kvm_host.h | 3 +++ arch/x86/kvm/emulate.c | 2 ++ arch/x86/kvm/svm.c | 15 +++++++++++++++ arch/x86/kvm/vmx.c | 15 +++++++++++++++ arch/x86/kvm/x86.c | 6 ++++++ 6 files changed, 42 insertions(+)