diff mbox series

x86/IRQ: don't keep EOI timer running without need

Message ID 5CC2EE8E02000078002296B6@prv1-mh.provo.novell.com (mailing list archive)
State New, archived
Headers show
Series x86/IRQ: don't keep EOI timer running without need | expand

Commit Message

Jan Beulich April 26, 2019, 11:42 a.m. UTC
The timer needs to remain active only until all pending IRQ instances
have seen EOIs from their respective domains. Stop it when the in-flight
count has reached zero in desc_guest_eoi(). Note that this is race free
(with __do_IRQ_guest()), as the IRQ descriptor lock is being held at
that point.

Also pull up stopping of the timer in __do_IRQ_guest() itself: Instead
of stopping it immediately before re-setting, stop it as soon as we've
made it past any early returns from the function (and hence we're sure
it'll get set again).

Finally bail from the actual timer handler in case we find the timer
already active again by the time we've managed to acquire the IRQ
descriptor lock. Without this we may forcibly EOI an IRQ immediately
after it got sent to a guest. For this, timer_is_active() gets split out
of active_timer(), deliberately moving just one of the two ASSERT()s (to
allow the function to be used also on a never initialized timer).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
diff mbox series

Patch

--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1115,6 +1115,9 @@  static void irq_guest_eoi_timer_fn(void
 
     action = (irq_guest_action_t *)desc->action;
 
+    if ( timer_is_active(&action->eoi_timer) )
+        goto out;
+
     if ( action->ack_type != ACKTYPE_NONE )
     {
         unsigned int i;
@@ -1167,6 +1170,9 @@  static void __do_IRQ_guest(int irq)
         return;
     }
 
+    if ( action->ack_type != ACKTYPE_NONE )
+        stop_timer(&action->eoi_timer);
+
     if ( action->ack_type == ACKTYPE_EOI )
     {
         sp = pending_eoi_sp(peoi);
@@ -1194,7 +1200,6 @@  static void __do_IRQ_guest(int irq)
 
     if ( action->ack_type != ACKTYPE_NONE )
     {
-        stop_timer(&action->eoi_timer);
         migrate_timer(&action->eoi_timer, smp_processor_id());
         set_timer(&action->eoi_timer, NOW() + MILLISECS(1));
     }
@@ -1457,6 +1462,8 @@  void desc_guest_eoi(struct irq_desc *des
         return;
     }
 
+    stop_timer(&action->eoi_timer);
+
     if ( action->ack_type == ACKTYPE_UNMASK )
     {
         ASSERT(cpumask_empty(action->cpu_eoi_map));
--- a/xen/common/timer.c
+++ b/xen/common/timer.c
@@ -282,11 +282,10 @@  static inline void timer_unlock(struct t
 })
 
 
-static bool_t active_timer(struct timer *timer)
+static bool active_timer(const struct timer *timer)
 {
     ASSERT(timer->status >= TIMER_STATUS_inactive);
-    ASSERT(timer->status <= TIMER_STATUS_in_list);
-    return (timer->status >= TIMER_STATUS_in_heap);
+    return timer_is_active(timer);
 }
 
 
--- a/xen/include/xen/timer.h
+++ b/xen/include/xen/timer.h
@@ -75,6 +75,19 @@  bool timer_expires_before(struct timer *
 
 #define timer_is_expired(t) timer_expires_before(t, NOW())
 
+/*
+ * True if a timer is active.
+ *
+ * Unlike for timer_expires_before(), it is the caller's responsibility to
+ * use suitable locking such that the returned value isn't stale by the time
+ * it gets acted upon.
+ */
+static inline bool timer_is_active(const struct timer *timer)
+{
+    ASSERT(timer->status <= TIMER_STATUS_in_list);
+    return timer->status >= TIMER_STATUS_in_heap;
+}
+
 /* Migrate a timer to a different CPU. The timer may be currently active. */
 void migrate_timer(struct timer *timer, unsigned int new_cpu);