diff mbox series

[v2] KVM: x86/xen: Inject vCPU upcall vector when local APIC is enabled

Message ID 884c08981d44f420f2a543276141563d07464f9b.camel@infradead.org (mailing list archive)
State New, archived
Headers show
Series [v2] KVM: x86/xen: Inject vCPU upcall vector when local APIC is enabled | expand

Commit Message

David Woodhouse Dec. 14, 2023, 4:56 p.m. UTC
From: David Woodhouse <dwmw@amazon.co.uk>

Linux guests since commit b1c3497e604d ("x86/xen: Add support for
HVMOP_set_evtchn_upcall_vector") in v6.0 onwards will use the per-vCPU
upcall vector when it's advertised in the Xen CPUID leaves.

This upcall is injected through the local APIC as an MSI, unlike the
older system vector which was merely injected by the hypervisor any time
the CPU was able to receive an interrupt and the upcall_pending flags is
set in its vcpu_info.

Effectively, that makes the per-CPU upcall edge triggered instead of
level triggered.

We lose edges.

Specifically, when the local APIC is *disabled*, delivering the MSI
will fail. Xen checks the vcpu_info->evtchn_upcall_pending flag when
enabling the local APIC for a vCPU and injects the vector immediately
if so.

Since userspace doesn't get to notice when the guest enables a local
APIC which is emulated in KVM, KVM needs to do the same.

Astute reviewers may note that kvm_xen_inject_vcpu_vector() function has
a WARN_ON_ONCE() in the case where kvm_irq_delivery_to_apic_fast() fails
and returns false. In the case where the MSI is not delivered due to the
local APIC being disabled, kvm_irq_delivery_to_apic_fast() still returns
true but the value in *r is zero. So the WARN_ON_ONCE() remains correct,
as that case should still never happen.

Fixes: fde0451be8fb3 ("KVM: x86/xen: Support per-vCPU event channel upcall via local APIC")
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
v2:
 • Add Fixes: tag.


 arch/x86/kvm/lapic.c |  5 ++++-
 arch/x86/kvm/xen.c   |  2 +-
 arch/x86/kvm/xen.h   | 18 ++++++++++++++++++
 3 files changed, 23 insertions(+), 2 deletions(-)

Comments

Paul Durrant Dec. 15, 2023, 9:10 a.m. UTC | #1
On 14/12/2023 16:56, David Woodhouse wrote:
> From: David Woodhouse <dwmw@amazon.co.uk>
> 
> Linux guests since commit b1c3497e604d ("x86/xen: Add support for
> HVMOP_set_evtchn_upcall_vector") in v6.0 onwards will use the per-vCPU
> upcall vector when it's advertised in the Xen CPUID leaves.
> 
> This upcall is injected through the local APIC as an MSI, unlike the
> older system vector which was merely injected by the hypervisor any time
> the CPU was able to receive an interrupt and the upcall_pending flags is
> set in its vcpu_info.
> 
> Effectively, that makes the per-CPU upcall edge triggered instead of
> level triggered.
> 
> We lose edges.
> 
> Specifically, when the local APIC is *disabled*, delivering the MSI
> will fail. Xen checks the vcpu_info->evtchn_upcall_pending flag when
> enabling the local APIC for a vCPU and injects the vector immediately
> if so.
> 
> Since userspace doesn't get to notice when the guest enables a local
> APIC which is emulated in KVM, KVM needs to do the same.
> 
> Astute reviewers may note that kvm_xen_inject_vcpu_vector() function has
> a WARN_ON_ONCE() in the case where kvm_irq_delivery_to_apic_fast() fails
> and returns false. In the case where the MSI is not delivered due to the
> local APIC being disabled, kvm_irq_delivery_to_apic_fast() still returns
> true but the value in *r is zero. So the WARN_ON_ONCE() remains correct,
> as that case should still never happen.
> 
> Fixes: fde0451be8fb3 ("KVM: x86/xen: Support per-vCPU event channel upcall via local APIC")
> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
> ---
> v2:
>   • Add Fixes: tag.
> 
> 
>   arch/x86/kvm/lapic.c |  5 ++++-
>   arch/x86/kvm/xen.c   |  2 +-
>   arch/x86/kvm/xen.h   | 18 ++++++++++++++++++
>   3 files changed, 23 insertions(+), 2 deletions(-)
> 

Reviewed-by: Paul Durrant <paul@xen.org>
diff mbox series

Patch

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 245b20973cae..7eeff9dd1e5a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -41,6 +41,7 @@ 
 #include "ioapic.h"
 #include "trace.h"
 #include "x86.h"
+#include "xen.h"
 #include "cpuid.h"
 #include "hyperv.h"
 #include "smm.h"
@@ -499,8 +500,10 @@  static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
        }
 
        /* Check if there are APF page ready requests pending */
-       if (enabled)
+       if (enabled) {
                kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
+               kvm_xen_enable_lapic(apic->vcpu);
+       }
 }
 
 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 5c19957c9aeb..6667f01170f9 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -483,7 +483,7 @@  void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
                kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable);
 }
 
-static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
+void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
 {
        struct kvm_lapic_irq irq = { };
        int r;
diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h
index f8f1fe22d090..8eba3943b246 100644
--- a/arch/x86/kvm/xen.h
+++ b/arch/x86/kvm/xen.h
@@ -18,6 +18,7 @@  extern struct static_key_false_deferred kvm_xen_enabled;
 
 int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
 void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
+void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu);
 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
@@ -36,6 +37,19 @@  int kvm_xen_setup_evtchn(struct kvm *kvm,
                         const struct kvm_irq_routing_entry *ue);
 void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu);
 
+static inline void kvm_xen_enable_lapic(struct kvm_vcpu *vcpu)
+{
+       /*
+        * The local APIC is being enabled. If the per-vCPU upcall vector is
+        * set and the vCPU's evtchn_upcall_pending flag is set, inject the
+        * interrupt.
+        */
+       if (static_branch_unlikely(&kvm_xen_enabled.key) &&
+           vcpu->arch.xen.vcpu_info_cache.active &&
+           vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu))
+               kvm_xen_inject_vcpu_vector(vcpu);
+}
+
 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
 {
        return static_branch_unlikely(&kvm_xen_enabled.key) &&
@@ -101,6 +115,10 @@  static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
 {
 }
 
+static inline void kvm_xen_enable_lapic(struct kvm_vcpu *vcpu)
+{
+}
+
 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
 {
        return false;