diff mbox series

[1/2] xen/spinlock: use lock address for lock debug functions

Message ID 20220224105436.1480-2-jgross@suse.com (mailing list archive)
State New, archived
Headers show
Series xen/spinlock: cleanup struct spinlock | expand

Commit Message

Jürgen Groß Feb. 24, 2022, 10:54 a.m. UTC
Instead of only passing the lock_debug address to check_lock() et al
use the address of the spinlock.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/spinlock.c      | 34 +++++++++++++++++-----------------
 xen/include/xen/rwlock.h   | 10 +++++-----
 xen/include/xen/spinlock.h | 10 ++++++++--
 3 files changed, 30 insertions(+), 24 deletions(-)

Comments

Jan Beulich Feb. 24, 2022, 4:12 p.m. UTC | #1
On 24.02.2022 11:54, Juergen Gross wrote:
> Instead of only passing the lock_debug address to check_lock() et al
> use the address of the spinlock.

I'm uncertain about this full exposure. The next patch looks to again
only use the new "data" subfield in these debugging helpers.

Jan
Jürgen Groß Feb. 25, 2022, 8:47 a.m. UTC | #2
On 24.02.22 17:12, Jan Beulich wrote:
> On 24.02.2022 11:54, Juergen Gross wrote:
>> Instead of only passing the lock_debug address to check_lock() et al
>> use the address of the spinlock.
> 
> I'm uncertain about this full exposure. The next patch looks to again
> only use the new "data" subfield in these debugging helpers.

Nevertheless I don't think that this detail should be exported,
especially as check_lock() is being used for rwlocks, too.


Juergen
diff mbox series

Patch

diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 62c83aaa6a..53d6ab6853 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -13,7 +13,7 @@ 
 
 static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
 
-void check_lock(union lock_debug *debug, bool try)
+void check_lock(spinlock_t *lock, bool try)
 {
     bool irq_safe = !local_irq_is_enabled();
 
@@ -49,12 +49,12 @@  void check_lock(union lock_debug *debug, bool try)
     if ( try && irq_safe )
         return;
 
-    if ( unlikely(debug->irq_safe != irq_safe) )
+    if ( unlikely(lock->debug.irq_safe != irq_safe) )
     {
         union lock_debug seen, new = { 0 };
 
         new.irq_safe = irq_safe;
-        seen.val = cmpxchg(&debug->val, LOCK_DEBUG_INITVAL, new.val);
+        seen.val = cmpxchg(&lock->debug.val, LOCK_DEBUG_INITVAL, new.val);
 
         if ( !seen.unseen && seen.irq_safe == !irq_safe )
         {
@@ -65,7 +65,7 @@  void check_lock(union lock_debug *debug, bool try)
     }
 }
 
-static void check_barrier(union lock_debug *debug)
+static void check_barrier(spinlock_t *lock)
 {
     if ( unlikely(atomic_read(&spin_debug) <= 0) )
         return;
@@ -81,19 +81,19 @@  static void check_barrier(union lock_debug *debug)
      * However, if we spin on an IRQ-unsafe lock with IRQs disabled then that
      * is clearly wrong, for the same reason outlined in check_lock() above.
      */
-    BUG_ON(!local_irq_is_enabled() && !debug->irq_safe);
+    BUG_ON(!local_irq_is_enabled() && !lock->debug.irq_safe);
 }
 
-static void got_lock(union lock_debug *debug)
+static void got_lock(spinlock_t *lock)
 {
-    debug->cpu = smp_processor_id();
+    lock->debug.cpu = smp_processor_id();
 }
 
-static void rel_lock(union lock_debug *debug)
+static void rel_lock(spinlock_t *lock)
 {
     if ( atomic_read(&spin_debug) > 0 )
-        BUG_ON(debug->cpu != smp_processor_id());
-    debug->cpu = SPINLOCK_NO_CPU;
+        BUG_ON(lock->debug.cpu != smp_processor_id());
+    lock->debug.cpu = SPINLOCK_NO_CPU;
 }
 
 void spin_debug_enable(void)
@@ -164,7 +164,7 @@  void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
     spinlock_tickets_t tickets = SPINLOCK_TICKET_INC;
     LOCK_PROFILE_VAR;
 
-    check_lock(&lock->debug, false);
+    check_lock(lock, false);
     preempt_disable();
     tickets.head_tail = arch_fetch_and_add(&lock->tickets.head_tail,
                                            tickets.head_tail);
@@ -176,7 +176,7 @@  void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
         arch_lock_relax();
     }
     arch_lock_acquire_barrier();
-    got_lock(&lock->debug);
+    got_lock(lock);
     LOCK_PROFILE_GOT;
 }
 
@@ -204,7 +204,7 @@  unsigned long _spin_lock_irqsave(spinlock_t *lock)
 void _spin_unlock(spinlock_t *lock)
 {
     LOCK_PROFILE_REL;
-    rel_lock(&lock->debug);
+    rel_lock(lock);
     arch_lock_release_barrier();
     add_sized(&lock->tickets.head, 1);
     arch_lock_signal();
@@ -240,7 +240,7 @@  int _spin_trylock(spinlock_t *lock)
     spinlock_tickets_t old, new;
 
     preempt_disable();
-    check_lock(&lock->debug, true);
+    check_lock(lock, true);
     old = observe_lock(&lock->tickets);
     if ( old.head != old.tail )
     {
@@ -259,7 +259,7 @@  int _spin_trylock(spinlock_t *lock)
      * cmpxchg() is a full barrier so no need for an
      * arch_lock_acquire_barrier().
      */
-    got_lock(&lock->debug);
+    got_lock(lock);
 #ifdef CONFIG_DEBUG_LOCK_PROFILE
     if (lock->profile)
         lock->profile->time_locked = NOW();
@@ -274,7 +274,7 @@  void _spin_barrier(spinlock_t *lock)
     s_time_t block = NOW();
 #endif
 
-    check_barrier(&lock->debug);
+    check_barrier(lock);
     smp_mb();
     sample = observe_lock(&lock->tickets);
     if ( sample.head != sample.tail )
@@ -300,7 +300,7 @@  int _spin_trylock_recursive(spinlock_t *lock)
     BUILD_BUG_ON(NR_CPUS > SPINLOCK_NO_CPU);
     BUILD_BUG_ON(SPINLOCK_RECURSE_BITS < 3);
 
-    check_lock(&lock->debug, true);
+    check_lock(lock, true);
 
     if ( likely(lock->recurse_cpu != cpu) )
     {
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index 0cc9167715..188fc809dc 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -56,7 +56,7 @@  static inline int _read_trylock(rwlock_t *lock)
     u32 cnts;
 
     preempt_disable();
-    check_lock(&lock->lock.debug, true);
+    check_lock(&lock->lock, true);
     cnts = atomic_read(&lock->cnts);
     if ( likely(_can_read_lock(cnts)) )
     {
@@ -90,7 +90,7 @@  static inline void _read_lock(rwlock_t *lock)
     if ( likely(_can_read_lock(cnts)) )
     {
         /* The slow path calls check_lock() via spin_lock(). */
-        check_lock(&lock->lock.debug, false);
+        check_lock(&lock->lock, false);
         return;
     }
 
@@ -169,7 +169,7 @@  static inline void _write_lock(rwlock_t *lock)
     if ( atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0 )
     {
         /* The slow path calls check_lock() via spin_lock(). */
-        check_lock(&lock->lock.debug, false);
+        check_lock(&lock->lock, false);
         return;
     }
 
@@ -206,7 +206,7 @@  static inline int _write_trylock(rwlock_t *lock)
     u32 cnts;
 
     preempt_disable();
-    check_lock(&lock->lock.debug, true);
+    check_lock(&lock->lock, true);
     cnts = atomic_read(&lock->cnts);
     if ( unlikely(cnts) ||
          unlikely(atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) != 0) )
@@ -341,7 +341,7 @@  static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
     else
     {
         /* All other paths have implicit check_lock() calls via read_lock(). */
-        check_lock(&percpu_rwlock->rwlock.lock.debug, false);
+        check_lock(&percpu_rwlock->rwlock.lock, false);
     }
 }
 
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index 961891bea4..5b6b73732f 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -21,13 +21,11 @@  union lock_debug {
     };
 };
 #define _LOCK_DEBUG { LOCK_DEBUG_INITVAL }
-void check_lock(union lock_debug *debug, bool try);
 void spin_debug_enable(void);
 void spin_debug_disable(void);
 #else
 union lock_debug { };
 #define _LOCK_DEBUG { }
-#define check_lock(l, t) ((void)0)
 #define spin_debug_enable() ((void)0)
 #define spin_debug_disable() ((void)0)
 #endif
@@ -189,6 +187,14 @@  int _spin_trylock_recursive(spinlock_t *lock);
 void _spin_lock_recursive(spinlock_t *lock);
 void _spin_unlock_recursive(spinlock_t *lock);
 
+#ifdef CONFIG_DEBUG_LOCKS
+void check_lock(spinlock_t *lock, bool try);
+#else
+static inline void check_lock(spinlock_t *lock, bool try)
+{
+}
+#endif
+
 #define spin_lock(l)                  _spin_lock(l)
 #define spin_lock_cb(l, c, d)         _spin_lock_cb(l, c, d)
 #define spin_lock_irq(l)              _spin_lock_irq(l)