diff mbox series

[for-4.14,2/2] x86/passthrough: introduce a flag for GSIs not requiring an EOI or unmask

Message ID 20200610115103.7592-3-roger.pau@citrix.com (mailing list archive)
State Superseded
Headers show
Series x86/passthrough: fixes for PVH dom0 edge triggered interrupts | expand

Commit Message

Roger Pau Monné June 10, 2020, 11:51 a.m. UTC
There's no need to setup a timer for GSIs that are edge triggered,
since those don't require any EIO or unmask, and hence couldn't block
other interrupts.

Note this is only used by PVH dom0, that can setup the passthrough of
edge triggered interrupts from the vIO-APIC. One example of such kind
of interrupt that can be used by a PVH dom0 would be the RTC timer.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
 xen/drivers/passthrough/io.c  | 14 +++++++++++++-
 xen/include/asm-x86/hvm/irq.h |  2 ++
 2 files changed, 15 insertions(+), 1 deletion(-)

Comments

Andrew Cooper June 10, 2020, 12:37 p.m. UTC | #1
On 10/06/2020 12:51, Roger Pau Monne wrote:
> @@ -920,6 +927,11 @@ static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
>          if ( pirq_dpci->flags & HVM_IRQ_DPCI_IDENTITY_GSI )
>          {
>              hvm_gsi_assert(d, pirq->pirq);
> +            if ( pirq_dpci->flags & HVM_IRQ_DPCI_NO_EOI )
> +            {
> +                spin_unlock(&d->event_lock);
> +                return;
> +            }

Urgh.  Could I possibly talk you into fixing hvm_dirq_assist() to have a
"goto out;" and a single unlock path ?  (How far are you expecting this
to be backported?)

I'm also totally unconvinced that the atomic test_and_clear() needs to
be done with the event lock held (it should either be non-atomic, or the
locking should be inside the if() condition), but that is probably not a
can of worms wanting opening right now...

~Andrew
Roger Pau Monné June 10, 2020, 12:46 p.m. UTC | #2
On Wed, Jun 10, 2020 at 01:37:15PM +0100, Andrew Cooper wrote:
> On 10/06/2020 12:51, Roger Pau Monne wrote:
> > @@ -920,6 +927,11 @@ static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
> >          if ( pirq_dpci->flags & HVM_IRQ_DPCI_IDENTITY_GSI )
> >          {
> >              hvm_gsi_assert(d, pirq->pirq);
> > +            if ( pirq_dpci->flags & HVM_IRQ_DPCI_NO_EOI )
> > +            {
> > +                spin_unlock(&d->event_lock);
> > +                return;
> > +            }
> 
> Urgh.  Could I possibly talk you into fixing hvm_dirq_assist() to have a
> "goto out;" and a single unlock path ?  (How far are you expecting this
> to be backported?)

I was very tempted to go that way but didn't want to introduce more
churn. Since you agree I will do so.

> I'm also totally unconvinced that the atomic test_and_clear() needs to
> be done with the event lock held (it should either be non-atomic, or the
> locking should be inside the if() condition), but that is probably not a
> can of worms wanting opening right now...

There's some reasoning about all this in 104072fc1c7e6ed. I also think
naming it masked is confusing, since the underlying interrupt might
not be masked. Anyway, this seems like something I don't really want
to get into now, as it seems quite fragile.

Thanks, Roger.
diff mbox series

Patch

diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index b292e79382..be1d5b1434 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -138,7 +138,8 @@  static void pt_pirq_softirq_reset(struct hvm_pirq_dpci *pirq_dpci)
 
 bool pt_irq_need_timer(uint32_t flags)
 {
-    return !(flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_TRANSLATE));
+    return !(flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_TRANSLATE |
+                      HVM_IRQ_DPCI_NO_EOI));
 }
 
 static int pt_irq_guest_eoi(struct domain *d, struct hvm_pirq_dpci *pirq_dpci,
@@ -558,6 +559,12 @@  int pt_irq_create_bind(
                      */
                     ASSERT(!mask);
                     share = trigger_mode;
+                    if ( !trigger_mode )
+                        /*
+                         * Edge IO-APIC interrupt, no EOI or unmask to perform
+                         * and hence no timer needed.
+                         */
+                        pirq_dpci->flags |= HVM_IRQ_DPCI_NO_EOI;
                 }
             }
 
@@ -920,6 +927,11 @@  static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
         if ( pirq_dpci->flags & HVM_IRQ_DPCI_IDENTITY_GSI )
         {
             hvm_gsi_assert(d, pirq->pirq);
+            if ( pirq_dpci->flags & HVM_IRQ_DPCI_NO_EOI )
+            {
+                spin_unlock(&d->event_lock);
+                return;
+            }
             pirq_dpci->pending++;
         }
 
diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h
index d306cfeade..532880d497 100644
--- a/xen/include/asm-x86/hvm/irq.h
+++ b/xen/include/asm-x86/hvm/irq.h
@@ -121,6 +121,7 @@  struct dev_intx_gsi_link {
 #define _HVM_IRQ_DPCI_GUEST_PCI_SHIFT           4
 #define _HVM_IRQ_DPCI_GUEST_MSI_SHIFT           5
 #define _HVM_IRQ_DPCI_IDENTITY_GSI_SHIFT        6
+#define _HVM_IRQ_DPCI_NO_EOI_SHIFT              7
 #define _HVM_IRQ_DPCI_TRANSLATE_SHIFT          15
 #define HVM_IRQ_DPCI_MACH_PCI        (1u << _HVM_IRQ_DPCI_MACH_PCI_SHIFT)
 #define HVM_IRQ_DPCI_MACH_MSI        (1u << _HVM_IRQ_DPCI_MACH_MSI_SHIFT)
@@ -129,6 +130,7 @@  struct dev_intx_gsi_link {
 #define HVM_IRQ_DPCI_GUEST_PCI       (1u << _HVM_IRQ_DPCI_GUEST_PCI_SHIFT)
 #define HVM_IRQ_DPCI_GUEST_MSI       (1u << _HVM_IRQ_DPCI_GUEST_MSI_SHIFT)
 #define HVM_IRQ_DPCI_IDENTITY_GSI    (1u << _HVM_IRQ_DPCI_IDENTITY_GSI_SHIFT)
+#define HVM_IRQ_DPCI_NO_EOI          (1u << _HVM_IRQ_DPCI_NO_EOI_SHIFT)
 #define HVM_IRQ_DPCI_TRANSLATE       (1u << _HVM_IRQ_DPCI_TRANSLATE_SHIFT)
 
 struct hvm_gmsi_info {