@@ -160,7 +160,7 @@ struct vcpu *vcpu_create(struct domain *
v->vcpu_id = vcpu_id;
v->dirty_cpu = VCPU_CPU_CLEAN;
- spin_lock_init(&v->virq_lock);
+ rwlock_init(&v->virq_lock);
tasklet_init(&v->continue_hypercall_tasklet, NULL, NULL);
@@ -475,6 +475,13 @@ int evtchn_bind_virq(evtchn_bind_virq_t
evtchn_write_unlock(chn);
bind->port = port;
+ /*
+ * If by any, the update of virq_to_evtchn[] would need guarding by
+ * virq_lock, but since this is the last action here, there's no strict
+ * need to acquire the lock. Hence holding event_lock isn't helpful
+ * anymore at this point, but utilize that its unlocking acts as the
+ * otherwise necessary smp_wmb() here.
+ */
write_atomic(&v->virq_to_evtchn[virq], port);
out:
@@ -661,10 +668,12 @@ int evtchn_close(struct domain *d1, int
case ECS_VIRQ:
for_each_vcpu ( d1, v )
{
- if ( read_atomic(&v->virq_to_evtchn[chn1->u.virq]) != port1 )
- continue;
- write_atomic(&v->virq_to_evtchn[chn1->u.virq], 0);
- spin_barrier(&v->virq_lock);
+ unsigned long flags;
+
+ write_lock_irqsave(&v->virq_lock, flags);
+ if ( read_atomic(&v->virq_to_evtchn[chn1->u.virq]) == port1 )
+ write_atomic(&v->virq_to_evtchn[chn1->u.virq], 0);
+ write_unlock_irqrestore(&v->virq_lock, flags);
}
break;
@@ -813,7 +822,7 @@ void send_guest_vcpu_virq(struct vcpu *v
ASSERT(!virq_is_global(virq));
- spin_lock_irqsave(&v->virq_lock, flags);
+ read_lock_irqsave(&v->virq_lock, flags);
port = read_atomic(&v->virq_to_evtchn[virq]);
if ( unlikely(port == 0) )
@@ -823,7 +832,7 @@ void send_guest_vcpu_virq(struct vcpu *v
evtchn_port_set_pending(d, v->vcpu_id, evtchn_from_port(d, port));
out:
- spin_unlock_irqrestore(&v->virq_lock, flags);
+ read_unlock_irqrestore(&v->virq_lock, flags);
}
void send_guest_global_virq(struct domain *d, uint32_t virq)
@@ -842,7 +851,7 @@ void send_guest_global_virq(struct domai
if ( unlikely(v == NULL) )
return;
- spin_lock_irqsave(&v->virq_lock, flags);
+ read_lock_irqsave(&v->virq_lock, flags);
port = read_atomic(&v->virq_to_evtchn[virq]);
if ( unlikely(port == 0) )
@@ -852,7 +861,7 @@ void send_guest_global_virq(struct domai
evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
out:
- spin_unlock_irqrestore(&v->virq_lock, flags);
+ read_unlock_irqrestore(&v->virq_lock, flags);
}
void send_guest_pirq(struct domain *d, const struct pirq *pirq)
@@ -238,7 +238,7 @@ struct vcpu
/* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */
evtchn_port_t virq_to_evtchn[NR_VIRQS];
- spinlock_t virq_lock;
+ rwlock_t virq_lock;
/* Tasklet for continue_hypercall_on_cpu(). */
struct tasklet continue_hypercall_tasklet;
There's no need to serialize all sending of vIRQ-s; all that's needed is serialization against the closing of the respective event channels (so far by means of a barrier). To facilitate the conversion, switch to an ordinary write locked region in evtchn_close(). Signed-off-by: Jan Beulich <jbeulich@suse.com> --- v3: Re-base over added new earlier patch. v2: Don't introduce/use rw_barrier() here. Add comment to evtchn_bind_virq(). Re-base.