@@ -6162,6 +6162,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
{
+ kvm_before_interrupt(vcpu);
local_irq_enable();
/*
* We must have an instruction with interrupts enabled, so
@@ -6169,6 +6170,7 @@ static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
*/
asm("nop");
local_irq_disable();
+ kvm_after_interrupt(vcpu);
}
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
@@ -6142,6 +6142,8 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
vector = intr_info & INTR_INFO_VECTOR_MASK;
entry = gate_offset((gate_desc *)host_idt_base + vector);
+ kvm_before_interrupt(vcpu);
+
asm volatile(
#ifdef CONFIG_X86_64
"mov %%" _ASM_SP ", %[sp]\n\t"
@@ -6162,6 +6164,8 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
[ss]"i"(__KERNEL_DS),
[cs]"i"(__KERNEL_CS)
);
+
+ kvm_after_interrupt(vcpu);
}
STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
@@ -7945,9 +7945,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
- kvm_before_interrupt(vcpu);
kvm_x86_ops->handle_external_intr(vcpu);
- kvm_after_interrupt(vcpu);
++vcpu->stat.exits;
VMX can conditionally call kvm_{before,after}_interrupt() since KVM always uses "ack interrupt on exit" and therefore explicitly handles interrupts as opposed to blindly enabling irqs. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kvm/svm.c | 2 ++ arch/x86/kvm/vmx/vmx.c | 4 ++++ arch/x86/kvm/x86.c | 2 -- 3 files changed, 6 insertions(+), 2 deletions(-)