diff mbox

X86/VMX: Disable VMX preempition timer if MWAIT is not intercepted

Message ID 1523350211-5747-1-git-send-email-karahmed@amazon.de (mailing list archive)
State New, archived
Headers show

Commit Message

KarimAllah Ahmed April 10, 2018, 8:50 a.m. UTC
The VMX-preemption timer is used by KVM as a way to set deadlines for the
guest (i.e. timer emulation). That was safe till very recently when
capability KVM_X86_DISABLE_EXITS_MWAIT to disable intercepting MWAIT was
introduced. According to Intel SDM 25.5.1:

"""
The VMX-preemption timer operates in the C-states C0, C1, and C2; it also
operates in the shutdown and wait-for-SIPI states. If the timer counts down
to zero in any state other than the wait-for SIPI state, the logical
processor transitions to the C0 C-state and causes a VM exit; the timer
does not cause a VM exit if it counts down to zero in the wait-for-SIPI
state. The timer is not decremented in C-states deeper than C2.
"""

Now once the guest issues the MWAIT with a c-state deeper than
C2 the preemption timer will never wake it up again since it stopped
ticking! Usually this is compensated by other activities in the system that
would wake the core from the deep C-state (and cause a VMExit). For
example, if the host itself is ticking or it received interrupts, etc!

So disable the VMX-preemption timer is MWAIT is exposed to the guest!

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: x86@kernel.org
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de>
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/lapic.c            |  3 ++-
 arch/x86/kvm/vmx.c              | 11 +++++++++--
 3 files changed, 12 insertions(+), 3 deletions(-)

Comments

Paolo Bonzini April 10, 2018, 9:04 a.m. UTC | #1
On 10/04/2018 10:50, KarimAllah Ahmed wrote:
>  	WARN_ON(preemptible());
> -	if (!kvm_x86_ops->set_hv_timer)
> +	if (!kvm_x86_ops->has_hv_timer ||
> +	    !kvm_x86_ops->has_hv_timer(apic->vcpu))
>  		return false;
>  
>  	if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))

Why not just return -ENOTSUP from vmx_set_hv_timer?

Thanks,

Paolo
KarimAllah Ahmed April 10, 2018, 10:09 a.m. UTC | #2
On Tue, 2018-04-10 at 11:04 +0200, Paolo Bonzini wrote:
> On 10/04/2018 10:50, KarimAllah Ahmed wrote:

> > 

> >  	WARN_ON(preemptible());

> > -	if (!kvm_x86_ops->set_hv_timer)

> > +	if (!kvm_x86_ops->has_hv_timer ||

> > +	    !kvm_x86_ops->has_hv_timer(apic->vcpu))

> >  		return false;

> >  

> >  	if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))

> 

> Why not just return -ENOTSUP from vmx_set_hv_timer?


hehe .. good point :)

I just sent v2!

> 

> Thanks,

> 

> Paolo

> 

Amazon Development Center Germany GmbH
Berlin - Dresden - Aachen
main office: Krausenstr. 38, 10117 Berlin
Geschaeftsfuehrer: Dr. Ralf Herbrich, Christian Schlaeger
Ust-ID: DE289237879
Eingetragen am Amtsgericht Charlottenburg HRB 149173 B
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 97448f1..5d9da9c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1090,6 +1090,7 @@  struct kvm_x86_ops {
 			      uint32_t guest_irq, bool set);
 	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
 
+	bool (*has_hv_timer)(struct kvm_vcpu *vcpu);
 	int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
 	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
 
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index a071dc1..9fb50e6 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1561,7 +1561,8 @@  static bool start_hv_timer(struct kvm_lapic *apic)
 	int r;
 
 	WARN_ON(preemptible());
-	if (!kvm_x86_ops->set_hv_timer)
+	if (!kvm_x86_ops->has_hv_timer ||
+	    !kvm_x86_ops->has_hv_timer(apic->vcpu))
 		return false;
 
 	if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d2e54e7..d99a823 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7112,7 +7112,7 @@  static __init int hardware_setup(void)
 		cpu_preemption_timer_multi =
 			 vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
 	} else {
-		kvm_x86_ops->set_hv_timer = NULL;
+		kvm_x86_ops->has_hv_timer = NULL;
 		kvm_x86_ops->cancel_hv_timer = NULL;
 	}
 
@@ -11901,6 +11901,11 @@  static inline int u64_shl_div_u64(u64 a, unsigned int shift,
 	return 0;
 }
 
+static bool vmx_has_hv_timer(struct kvm_vcpu *vcpu)
+{
+	return !kvm_pause_in_guest(vcpu->kvm);
+}
+
 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -12136,7 +12141,8 @@  static void pi_post_block(struct kvm_vcpu *vcpu)
 
 static void vmx_post_block(struct kvm_vcpu *vcpu)
 {
-	if (kvm_x86_ops->set_hv_timer)
+	if (kvm_x86_ops->has_hv_timer &&
+	    kvm_x86_ops->has_hv_timer(vcpu))
 		kvm_lapic_switch_to_hv_timer(vcpu);
 
 	pi_post_block(vcpu);
@@ -12592,6 +12598,7 @@  static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.update_pi_irte = vmx_update_pi_irte,
 
 #ifdef CONFIG_X86_64
+	.has_hv_timer = vmx_has_hv_timer,
 	.set_hv_timer = vmx_set_hv_timer,
 	.cancel_hv_timer = vmx_cancel_hv_timer,
 #endif