diff mbox

[1/5] KVM: x86: introduce ISA specific SMM entry/exit callbacks

Message ID 20170913140628.7787-2-lprosek@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Ladi Prosek Sept. 13, 2017, 2:06 p.m. UTC
Entering and exiting SMM may require ISA specific handling under certain
circumstances. This commit adds two new callbacks with empty implementations.
Actual functionality will be added in following commits.

* prep_enter_smm() is to be called when injecting an SMM, before any
  SMM related vcpu state has been changed
* post_leave_smm() is to be called when emulating the RSM instruction,
  after all SMM related vcpu state has been restored

Signed-off-by: Ladi Prosek <lprosek@redhat.com>
---
 arch/x86/include/asm/kvm_emulate.h |  1 +
 arch/x86/include/asm/kvm_host.h    |  3 +++
 arch/x86/kvm/emulate.c             |  2 ++
 arch/x86/kvm/svm.c                 | 15 +++++++++++++++
 arch/x86/kvm/vmx.c                 | 15 +++++++++++++++
 arch/x86/kvm/x86.c                 |  6 ++++++
 6 files changed, 42 insertions(+)

Comments

Paolo Bonzini Sept. 13, 2017, 9:44 p.m. UTC | #1
On 13/09/2017 16:06, Ladi Prosek wrote:
> +	bool left_smm;  /* post_leave_smm() needs to be called after emulation */

This is already stored (more or less) in hflags.  Would it work to
invoke the hook from kvm_smm_changed instead?

Thanks,

Paolo
Ladi Prosek Sept. 14, 2017, 7:14 a.m. UTC | #2
On Wed, Sep 13, 2017 at 11:44 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> On 13/09/2017 16:06, Ladi Prosek wrote:
>> +     bool left_smm;  /* post_leave_smm() needs to be called after emulation */
>
> This is already stored (more or less) in hflags.  Would it work to
> invoke the hook from kvm_smm_changed instead?

I would have to reorder some of the calls under "if (writeback)" in
x86_emulate_instruction to make it work. The hook needs to be called
after all vcpu state has been synced. For example now kvm_rip_write
runs after kvm_set_hflags so it would overwrite the rip restored by
the hook.

The left_smm field is indeed not necessary though. What if I leave it
as a separate statement in x86_emulate_instruction to make the
ordering requirement explicit, but use hflags to detect that we've
left SMM?

Thanks!
Ladi
Paolo Bonzini Sept. 14, 2017, 9:47 a.m. UTC | #3
On 14/09/2017 09:14, Ladi Prosek wrote:
> I would have to reorder some of the calls under "if (writeback)" in
> x86_emulate_instruction to make it work. The hook needs to be called
> after all vcpu state has been synced. For example now kvm_rip_write
> runs after kvm_set_hflags so it would overwrite the rip restored by
> the hook.
> 
> The left_smm field is indeed not necessary though. What if I leave it
> as a separate statement in x86_emulate_instruction to make the
> ordering requirement explicit, but use hflags to detect that we've
> left SMM?

That'd be even better, yes.

Paolo
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index fde36f1..8d56703 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -298,6 +298,7 @@  struct x86_emulate_ctxt {
 	bool perm_ok; /* do not check permissions if true */
 	bool ud;	/* inject an #UD if host doesn't support insn */
 	bool tf;	/* TF value before instruction (after for syscall/sysret) */
+	bool left_smm;  /* post_leave_smm() needs to be called after emulation */
 
 	bool have_exception;
 	struct x86_exception exception;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 92c9032..26acdb3 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1058,6 +1058,9 @@  struct kvm_x86_ops {
 	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
 
 	void (*setup_mce)(struct kvm_vcpu *vcpu);
+
+	int (*prep_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
+	int (*post_leave_smm)(struct kvm_vcpu *vcpu);
 };
 
 struct kvm_arch_async_pf {
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index fb00559..5faaf85 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2601,6 +2601,8 @@  static int em_rsm(struct x86_emulate_ctxt *ctxt)
 
 	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
 		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
+	ctxt->left_smm = true;
+
 	return X86EMUL_CONTINUE;
 }
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index af256b7..0c5a599 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5357,6 +5357,18 @@  static void svm_setup_mce(struct kvm_vcpu *vcpu)
 	vcpu->arch.mcg_cap &= 0x1ff;
 }
 
+static int svm_prep_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+	/* TODO: Implement */
+	return 0;
+}
+
+static int svm_post_leave_smm(struct kvm_vcpu *vcpu)
+{
+	/* TODO: Implement */
+	return 0;
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,
@@ -5467,6 +5479,9 @@  static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.deliver_posted_interrupt = svm_deliver_avic_intr,
 	.update_pi_irte = svm_update_pi_irte,
 	.setup_mce = svm_setup_mce,
+
+	.prep_enter_smm = svm_prep_enter_smm,
+	.post_leave_smm = svm_post_leave_smm,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c6ef294..d56a528 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11629,6 +11629,18 @@  static void vmx_setup_mce(struct kvm_vcpu *vcpu)
 			~FEATURE_CONTROL_LMCE;
 }
 
+static int vmx_prep_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+	/* TODO: Implement */
+	return 0;
+}
+
+static int vmx_post_leave_smm(struct kvm_vcpu *vcpu)
+{
+	/* TODO: Implement */
+	return 0;
+}
+
 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = cpu_has_kvm_support,
 	.disabled_by_bios = vmx_disabled_by_bios,
@@ -11754,6 +11766,9 @@  static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 #endif
 
 	.setup_mce = vmx_setup_mce,
+
+	.prep_enter_smm = vmx_prep_enter_smm,
+	.post_leave_smm = vmx_post_leave_smm,
 };
 
 static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 272320e..21ade70 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5674,6 +5674,7 @@  int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 		ctxt->have_exception = false;
 		ctxt->exception.vector = -1;
 		ctxt->perm_ok = false;
+		ctxt->left_smm = false;
 
 		ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
 
@@ -5755,6 +5756,8 @@  int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 		toggle_interruptibility(vcpu, ctxt->interruptibility);
 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
 		kvm_rip_write(vcpu, ctxt->eip);
+		if (r == EMULATE_DONE && ctxt->left_smm)
+			kvm_x86_ops->post_leave_smm(vcpu);
 		if (r == EMULATE_DONE &&
 		    (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
 			kvm_vcpu_do_singlestep(vcpu, &r);
@@ -6614,6 +6617,9 @@  static void enter_smm(struct kvm_vcpu *vcpu)
 	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
 	vcpu->arch.hflags |= HF_SMM_MASK;
 	memset(buf, 0, 512);
+
+	kvm_x86_ops->prep_enter_smm(vcpu, buf);
+
 	if (guest_cpuid_has_longmode(vcpu))
 		enter_smm_save_state_64(vcpu, buf);
 	else