diff mbox series

[for-4.13,v4,3/3] x86/vioapic: sync PIR to IRR when modifying entries

Message ID 20191113155940.81837-4-roger.pau@citrix.com (mailing list archive)
State New, archived
Headers show
Series x86/passthrough: fix interrupt migration when using posting | expand

Commit Message

Roger Pau Monné Nov. 13, 2019, 3:59 p.m. UTC
If posted interrutps are being used sync PIR to IRR when an unmasked
vIO-APIC entry is modified. Do this in order to prevent vectors in the
IRR being set after a change to a vIO-APIC entry has been performed.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Cc: Juergen Gross <jgross@suse.com>
---
 xen/arch/x86/hvm/vioapic.c | 46 +++++++++++++++++++++++++++++++++-----
 1 file changed, 41 insertions(+), 5 deletions(-)

Comments

Jan Beulich Nov. 14, 2019, 1:49 p.m. UTC | #1
On 13.11.2019 16:59, Roger Pau Monne wrote:
> --- a/xen/arch/x86/hvm/vioapic.c
> +++ b/xen/arch/x86/hvm/vioapic.c
> @@ -212,6 +212,44 @@ static int vioapic_hwdom_map_gsi(unsigned int gsi, unsigned int trig,
>      return ret;
>  }
>  
> +static inline int pit_channel0_enabled(void)
> +{
> +    return pt_active(&current->domain->arch.vpit.pt0);
> +}

Rather than moving it up here, could I talk you into taking the
opportunity and move it into hvm/vpt.h? This really isn't a
vIO-APIC function, and hence should never have been placed in
this file.

> +static void sync_vcpus_pir(struct domain *d, union vioapic_redir_entry *ent,

const (twice)?

> +                           unsigned int irq)
> +{
> +    DECLARE_BITMAP(vcpus, MAX_VIRT_CPUS) = { };

Same comment here as for patch 2.

> +    switch ( ent->fields.delivery_mode )
> +    {
> +    case dest_LowestPrio:
> +    case dest_Fixed:
> +#ifdef IRQ0_SPECIAL_ROUTING
> +        if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
> +        {
> +            __set_bit(0, vcpus);
> +            break;
> +        }
> +#endif
> +        hvm_intr_get_dests(d, ent->fields.dest_id, ent->fields.dest_mode,
> +                           ent->fields.delivery_mode, vcpus);
> +        break;
> +
> +    case dest_NMI:
> +        /* Nothing to do, NMIs are not signaled on the PIR. */
> +        break;
> +
> +    default:
> +        gdprintk(XENLOG_WARNING, "unsupported delivery mode %02u\n",
> +                 ent->fields.delivery_mode);
> +        break;
> +    }
> +
> +    domain_sync_vlapic_pir(d, vcpus);
> +}

I realize it may be less of a risk for 4.13 this way, but there's
quite a bit of logic duplication with vioapic_deliver(), which
would be nice to be taken care of by breaking out similar logic
into one or more helpers.

> @@ -235,6 +273,9 @@ static void vioapic_write_redirent(
>      pent = &vioapic->redirtbl[idx];
>      ent  = *pent;
>  
> +    if ( !ent.fields.mask && hvm_funcs.deliver_posted_intr )
> +        sync_vcpus_pir(d, pent, vioapic->base_gsi + idx);

Just like for MSI, don't you want to do this _after_ having
updated everything? It may not matter much because this is
within a region with the necessary lock held, but it would at
least look better from an abstract pov.

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 9aeef32a14..90e6d1c4e6 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -212,6 +212,44 @@  static int vioapic_hwdom_map_gsi(unsigned int gsi, unsigned int trig,
     return ret;
 }
 
+static inline int pit_channel0_enabled(void)
+{
+    return pt_active(&current->domain->arch.vpit.pt0);
+}
+
+static void sync_vcpus_pir(struct domain *d, union vioapic_redir_entry *ent,
+                           unsigned int irq)
+{
+    DECLARE_BITMAP(vcpus, MAX_VIRT_CPUS) = { };
+
+    switch ( ent->fields.delivery_mode )
+    {
+    case dest_LowestPrio:
+    case dest_Fixed:
+#ifdef IRQ0_SPECIAL_ROUTING
+        if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
+        {
+            __set_bit(0, vcpus);
+            break;
+        }
+#endif
+        hvm_intr_get_dests(d, ent->fields.dest_id, ent->fields.dest_mode,
+                           ent->fields.delivery_mode, vcpus);
+        break;
+
+    case dest_NMI:
+        /* Nothing to do, NMIs are not signaled on the PIR. */
+        break;
+
+    default:
+        gdprintk(XENLOG_WARNING, "unsupported delivery mode %02u\n",
+                 ent->fields.delivery_mode);
+        break;
+    }
+
+    domain_sync_vlapic_pir(d, vcpus);
+}
+
 static void vioapic_write_redirent(
     struct hvm_vioapic *vioapic, unsigned int idx,
     int top_word, uint32_t val)
@@ -235,6 +273,9 @@  static void vioapic_write_redirent(
     pent = &vioapic->redirtbl[idx];
     ent  = *pent;
 
+    if ( !ent.fields.mask && hvm_funcs.deliver_posted_intr )
+        sync_vcpus_pir(d, pent, vioapic->base_gsi + idx);
+
     if ( top_word )
     {
         /* Contains only the dest_id. */
@@ -391,11 +432,6 @@  static void ioapic_inj_irq(
     vlapic_set_irq(target, vector, trig_mode);
 }
 
-static inline int pit_channel0_enabled(void)
-{
-    return pt_active(&current->domain->arch.vpit.pt0);
-}
-
 static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin)
 {
     uint16_t dest = vioapic->redirtbl[pin].fields.dest_id;