@@ -129,6 +129,31 @@ static always_inline u16 observe_head(spinlock_tickets_t *t)
return read_atomic(&t->head);
}
+int _spin_lock_cond(spinlock_t *lock, bool_t (*cond)(void *), void *data)
+{
+ spinlock_tickets_t tickets = SPINLOCK_TICKET_INC;
+ LOCK_PROFILE_VAR;
+
+ check_lock(&lock->debug);
+ tickets.head_tail = arch_fetch_and_add(&lock->tickets.head_tail,
+ tickets.head_tail);
+ while ( tickets.tail != observe_head(&lock->tickets) )
+ {
+ LOCK_PROFILE_BLOCK;
+ arch_lock_relax();
+ if ( cond && !cond(data) )
+ {
+ add_sized(&lock->tickets.head, 1);
+ arch_lock_signal();
+ return 0;
+ }
+ }
+ LOCK_PROFILE_GOT;
+ preempt_disable();
+ arch_lock_acquire_barrier();
+ return 1;
+}
+
void _spin_lock(spinlock_t *lock)
{
spinlock_tickets_t tickets = SPINLOCK_TICKET_INC;
@@ -153,6 +153,7 @@ typedef struct spinlock {
#define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
void _spin_lock(spinlock_t *lock);
+int _spin_lock_cond(spinlock_t *lock, bool_t (*cond)(void *), void *data);
void _spin_lock_irq(spinlock_t *lock);
unsigned long _spin_lock_irqsave(spinlock_t *lock);
@@ -169,6 +170,8 @@ void _spin_lock_recursive(spinlock_t *lock);
void _spin_unlock_recursive(spinlock_t *lock);
#define spin_lock(l) _spin_lock(l)
+#define spin_lock_cond(l, c, d) _spin_lock_cond(l, c, d)
+#define spink_lock_kick(l) arch_lock_signal()
#define spin_lock_irq(l) _spin_lock_irq(l)
#define spin_lock_irqsave(l, f) \
({ \
Because _spin_trylock() doesn't take lock ticket it may take a long time until the lock is taken. Add _spin_lock_cond() that waits for the lock while periodically checking condition that may cause the lock request to be dropped. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> --- xen/common/spinlock.c | 25 +++++++++++++++++++++++++ xen/include/xen/spinlock.h | 3 +++ 2 files changed, 28 insertions(+), 0 deletions(-)