diff mbox series

[v3,3/5] evtchn: convert vIRQ lock to an r/w one

Message ID d2461bd6-fb2f-447f-11c6-bd8afd573d7b@suse.com (mailing list archive)
State Superseded
Headers show
Series evtchn: (not so) recent XSAs follow-on | expand

Commit Message

Jan Beulich Nov. 23, 2020, 1:28 p.m. UTC
There's no need to serialize all sending of vIRQ-s; all that's needed
is serialization against the closing of the respective event channels
(so far by means of a barrier). To facilitate the conversion, switch to
an ordinary write locked region in evtchn_close().

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: Re-base over added new earlier patch.
v2: Don't introduce/use rw_barrier() here. Add comment to
    evtchn_bind_virq(). Re-base.

Comments

Julien Grall Dec. 9, 2020, 11:16 a.m. UTC | #1
Hi Jan,

On 23/11/2020 13:28, Jan Beulich wrote:
> There's no need to serialize all sending of vIRQ-s; all that's needed
> is serialization against the closing of the respective event channels
> (so far by means of a barrier). To facilitate the conversion, switch to
> an ordinary write locked region in evtchn_close().
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Julien Grall <jgrall@amazon.com>

Cheers,
diff mbox series

Patch

--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -160,7 +160,7 @@  struct vcpu *vcpu_create(struct domain *
     v->vcpu_id = vcpu_id;
     v->dirty_cpu = VCPU_CPU_CLEAN;
 
-    spin_lock_init(&v->virq_lock);
+    rwlock_init(&v->virq_lock);
 
     tasklet_init(&v->continue_hypercall_tasklet, NULL, NULL);
 
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -475,6 +475,13 @@  int evtchn_bind_virq(evtchn_bind_virq_t
     evtchn_write_unlock(chn);
 
     bind->port = port;
+    /*
+     * If by any, the update of virq_to_evtchn[] would need guarding by
+     * virq_lock, but since this is the last action here, there's no strict
+     * need to acquire the lock. Hence holding event_lock isn't helpful
+     * anymore at this point, but utilize that its unlocking acts as the
+     * otherwise necessary smp_wmb() here.
+     */
     write_atomic(&v->virq_to_evtchn[virq], port);
 
  out:
@@ -661,10 +668,12 @@  int evtchn_close(struct domain *d1, int
     case ECS_VIRQ:
         for_each_vcpu ( d1, v )
         {
-            if ( read_atomic(&v->virq_to_evtchn[chn1->u.virq]) != port1 )
-                continue;
-            write_atomic(&v->virq_to_evtchn[chn1->u.virq], 0);
-            spin_barrier(&v->virq_lock);
+            unsigned long flags;
+
+            write_lock_irqsave(&v->virq_lock, flags);
+            if ( read_atomic(&v->virq_to_evtchn[chn1->u.virq]) == port1 )
+                write_atomic(&v->virq_to_evtchn[chn1->u.virq], 0);
+            write_unlock_irqrestore(&v->virq_lock, flags);
         }
         break;
 
@@ -813,7 +822,7 @@  void send_guest_vcpu_virq(struct vcpu *v
 
     ASSERT(!virq_is_global(virq));
 
-    spin_lock_irqsave(&v->virq_lock, flags);
+    read_lock_irqsave(&v->virq_lock, flags);
 
     port = read_atomic(&v->virq_to_evtchn[virq]);
     if ( unlikely(port == 0) )
@@ -823,7 +832,7 @@  void send_guest_vcpu_virq(struct vcpu *v
     evtchn_port_set_pending(d, v->vcpu_id, evtchn_from_port(d, port));
 
  out:
-    spin_unlock_irqrestore(&v->virq_lock, flags);
+    read_unlock_irqrestore(&v->virq_lock, flags);
 }
 
 void send_guest_global_virq(struct domain *d, uint32_t virq)
@@ -842,7 +851,7 @@  void send_guest_global_virq(struct domai
     if ( unlikely(v == NULL) )
         return;
 
-    spin_lock_irqsave(&v->virq_lock, flags);
+    read_lock_irqsave(&v->virq_lock, flags);
 
     port = read_atomic(&v->virq_to_evtchn[virq]);
     if ( unlikely(port == 0) )
@@ -852,7 +861,7 @@  void send_guest_global_virq(struct domai
     evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
 
  out:
-    spin_unlock_irqrestore(&v->virq_lock, flags);
+    read_unlock_irqrestore(&v->virq_lock, flags);
 }
 
 void send_guest_pirq(struct domain *d, const struct pirq *pirq)
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -238,7 +238,7 @@  struct vcpu
 
     /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */
     evtchn_port_t    virq_to_evtchn[NR_VIRQS];
-    spinlock_t       virq_lock;
+    rwlock_t         virq_lock;
 
     /* Tasklet for continue_hypercall_on_cpu(). */
     struct tasklet   continue_hypercall_tasklet;