[v4,03/13] x86/IRQ: desc->affinity should strictly represent the requested value
diff mbox series

Message ID 0862ea72-a405-ef5a-d309-eea3ee918969@suse.com
State New
Headers show
Series
  • [v4,01/13] x86/IRQ: deal with move-in-progress state in fixup_irqs()
Related show

Commit Message

Jan Beulich July 16, 2019, 7:38 a.m. UTC
desc->arch.cpu_mask reflects the actual set of target CPUs. Don't ever
fiddle with desc->affinity itself, except to store caller requested
values. Note that assign_irq_vector() now takes a NULL incoming CPU mask
to mean "all CPUs" now, rather than just "all currently online CPUs".
This way no further affinity adjustment is needed after onlining further
CPUs.

This renders both set_native_irq_info() uses (which weren't using proper
locking anyway) redundant - drop the function altogether.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
---
v4: Use %*pbl.
---
TBD: To reduce the bad effect on the so far tabular output of the 'i'
      debug key, shifting the two affinity values further to the right
      may be worthwhile to consider.

Comments

Andrew Cooper July 19, 2019, 1:25 p.m. UTC | #1
On 16/07/2019 08:38, Jan Beulich wrote:
> desc->arch.cpu_mask reflects the actual set of target CPUs. Don't ever
> fiddle with desc->affinity itself, except to store caller requested
> values. Note that assign_irq_vector() now takes a NULL incoming CPU mask
> to mean "all CPUs" now, rather than just "all currently online CPUs".
> This way no further affinity adjustment is needed after onlining further
> CPUs.
>
> This renders both set_native_irq_info() uses (which weren't using proper
> locking anyway) redundant - drop the function altogether.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>

There are utf8 encoding problems here, but the patch in 0/$N does look
to be ok.

Patch
diff mbox series

--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -1039,7 +1039,6 @@  static void __init setup_IO_APIC_irqs(vo
              SET_DEST(entry, logical, cpu_mask_to_apicid(TARGET_CPUS));
              spin_lock_irqsave(&ioapic_lock, flags);
              __ioapic_write_entry(apic, pin, 0, entry);
-            set_native_irq_info(irq, TARGET_CPUS);
              spin_unlock_irqrestore(&ioapic_lock, flags);
          }
      }
@@ -2248,7 +2247,6 @@  int io_apic_set_pci_routing (int ioapic,
  
      spin_lock_irqsave(&ioapic_lock, flags);
      __ioapic_write_entry(ioapic, pin, 0, entry);
-    set_native_irq_info(irq, TARGET_CPUS);
      spin_unlock(&ioapic_lock);
  
      spin_lock(&desc->lock);
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -589,11 +589,16 @@  int assign_irq_vector(int irq, const cpu
  
      spin_lock_irqsave(&vector_lock, flags);
      ret = __assign_irq_vector(irq, desc, mask ?: TARGET_CPUS);
-    if (!ret) {
+    if ( !ret )
+    {
          ret = desc->arch.vector;
-        cpumask_copy(desc->affinity, desc->arch.cpu_mask);
+        if ( mask )
+            cpumask_copy(desc->affinity, mask);
+        else
+            cpumask_setall(desc->affinity);
      }
      spin_unlock_irqrestore(&vector_lock, flags);
+
      return ret;
  }
  
@@ -2345,9 +2350,10 @@  static void dump_irqs(unsigned char key)
  
          spin_lock_irqsave(&desc->lock, flags);
  
-        printk("   IRQ:%4d aff:%*pb vec:%02x %-15s status=%03x ",
-               irq, nr_cpu_ids, cpumask_bits(desc->affinity), desc->arch.vector,
-               desc->handler->typename, desc->status);
+        printk("   IRQ:%4d aff:{%*pbl}/{%*pbl} vec:%02x %-15s status=%03x ",
+               irq, nr_cpu_ids, cpumask_bits(desc->affinity),
+               nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
+               desc->arch.vector, desc->handler->typename, desc->status);
  
          if ( ssid )
              printk("Z=%-25s ", ssid);
@@ -2435,8 +2441,7 @@  void fixup_irqs(const cpumask_t *mask, b
                  release_old_vec(desc);
          }
  
-        cpumask_copy(&affinity, desc->affinity);
-        if ( !desc->action || cpumask_subset(&affinity, mask) )
+        if ( !desc->action || cpumask_subset(desc->affinity, mask) )
          {
              spin_unlock(&desc->lock);
              continue;
@@ -2469,12 +2474,13 @@  void fixup_irqs(const cpumask_t *mask, b
              desc->arch.move_in_progress = 0;
          }
  
-        cpumask_and(&affinity, &affinity, mask);
-        if ( cpumask_empty(&affinity) )
+        if ( !cpumask_intersects(mask, desc->affinity) )
          {
              break_affinity = true;
-            cpumask_copy(&affinity, mask);
+            cpumask_setall(&affinity);
          }
+        else
+            cpumask_copy(&affinity, desc->affinity);
  
          if ( desc->handler->disable )
              desc->handler->disable(desc);
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -162,11 +162,6 @@  extern irq_desc_t *domain_spin_lock_irq_
  extern irq_desc_t *pirq_spin_lock_irq_desc(
      const struct pirq *, unsigned long *pflags);
  
-static inline void set_native_irq_info(unsigned int irq, const cpumask_t *mask)
-{
-    cpumask_copy(irq_to_desc(irq)->affinity, mask);
-}
-
  unsigned int set_desc_affinity(struct irq_desc *, const cpumask_t *);
  
  #ifndef arch_hwdom_irqs