diff mbox series

[v2,2/2] xen/spinlocks: fix placement of preempt_[dis|en]able()

Message ID 20200313143755.31870-3-jgross@suse.com (mailing list archive)
State New, archived
Headers show
Series xen/locks: fix preempt disabling in lock handling | expand

Commit Message

Jürgen Groß March 13, 2020, 2:37 p.m. UTC
In case Xen ever gains preemption support the spinlock coding's
placement of preempt_disable() and preempt_enable() should be outside
of the locked section.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- move preempt_enable() to the very end of _spin_unlock() (Jan Beulich)
---
 xen/common/spinlock.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

Comments

Jan Beulich March 16, 2020, 10 a.m. UTC | #1
On 13.03.2020 15:37, Juergen Gross wrote:
> In case Xen ever gains preemption support the spinlock coding's
> placement of preempt_disable() and preempt_enable() should be outside
> of the locked section.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>
diff mbox series

Patch

diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 344981c54a..6c8b62beb0 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -160,6 +160,7 @@  void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
     LOCK_PROFILE_VAR;
 
     check_lock(&lock->debug);
+    preempt_disable();
     tickets.head_tail = arch_fetch_and_add(&lock->tickets.head_tail,
                                            tickets.head_tail);
     while ( tickets.tail != observe_head(&lock->tickets) )
@@ -171,7 +172,6 @@  void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
     }
     got_lock(&lock->debug);
     LOCK_PROFILE_GOT;
-    preempt_disable();
     arch_lock_acquire_barrier();
 }
 
@@ -199,11 +199,11 @@  unsigned long _spin_lock_irqsave(spinlock_t *lock)
 void _spin_unlock(spinlock_t *lock)
 {
     arch_lock_release_barrier();
-    preempt_enable();
     LOCK_PROFILE_REL;
     rel_lock(&lock->debug);
     add_sized(&lock->tickets.head, 1);
     arch_lock_signal();
+    preempt_enable();
 }
 
 void _spin_unlock_irq(spinlock_t *lock)
@@ -242,15 +242,18 @@  int _spin_trylock(spinlock_t *lock)
         return 0;
     new = old;
     new.tail++;
+    preempt_disable();
     if ( cmpxchg(&lock->tickets.head_tail,
                  old.head_tail, new.head_tail) != old.head_tail )
+    {
+        preempt_enable();
         return 0;
+    }
     got_lock(&lock->debug);
 #ifdef CONFIG_DEBUG_LOCK_PROFILE
     if (lock->profile)
         lock->profile->time_locked = NOW();
 #endif
-    preempt_disable();
     /*
      * cmpxchg() is a full barrier so no need for an
      * arch_lock_acquire_barrier().