diff mbox series

[v3,3/3] xen/rwlock: add check_lock() handling to rwlocks

Message ID 20201104081549.3712-4-jgross@suse.com (mailing list archive)
State New, archived
Headers show
Series xen/locking: fix and enhance lock debugging | expand

Commit Message

Jürgen Groß Nov. 4, 2020, 8:15 a.m. UTC
Checking whether a lock is consistently used regarding interrupts on
or off is beneficial for rwlocks, too.

So add check_lock() calls to rwlock functions. For this purpose make
check_lock() globally accessible.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- call check_lock() unconditionally in try_lock variants (Jan Beulich)

V3:
- add comments (Jan Beulich)
- place check_lock() calls inside preempt-off region (Jan Beulich)

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/spinlock.c      |  3 +--
 xen/include/xen/rwlock.h   | 15 +++++++++++++++
 xen/include/xen/spinlock.h |  2 ++
 3 files changed, 18 insertions(+), 2 deletions(-)

Comments

Julien Grall Nov. 4, 2020, 9:41 a.m. UTC | #1
Hi Juergen,

On 04/11/2020 08:15, Juergen Gross wrote:
> Checking whether a lock is consistently used regarding interrupts on
> or off is beneficial for rwlocks, too.
> 
> So add check_lock() calls to rwlock functions. For this purpose make
> check_lock() globally accessible.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>

Reviewed-by: Julien Grall <jgrall@amazon.com>

Cheers,
Jan Beulich Nov. 4, 2020, 9:42 a.m. UTC | #2
On 04.11.2020 09:15, Juergen Gross wrote:
> Checking whether a lock is consistently used regarding interrupts on
> or off is beneficial for rwlocks, too.
> 
> So add check_lock() calls to rwlock functions. For this purpose make
> check_lock() globally accessible.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>
diff mbox series

Patch

diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index f4eb50f030..b90981bb27 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -13,7 +13,7 @@ 
 
 static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
 
-static void check_lock(union lock_debug *debug, bool try)
+void check_lock(union lock_debug *debug, bool try)
 {
     bool irq_safe = !local_irq_is_enabled();
 
@@ -108,7 +108,6 @@  void spin_debug_disable(void)
 
 #else /* CONFIG_DEBUG_LOCKS */
 
-#define check_lock(l, t) ((void)0)
 #define check_barrier(l) ((void)0)
 #define got_lock(l) ((void)0)
 #define rel_lock(l) ((void)0)
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index 427664037a..0cc9167715 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -56,6 +56,7 @@  static inline int _read_trylock(rwlock_t *lock)
     u32 cnts;
 
     preempt_disable();
+    check_lock(&lock->lock.debug, true);
     cnts = atomic_read(&lock->cnts);
     if ( likely(_can_read_lock(cnts)) )
     {
@@ -87,7 +88,11 @@  static inline void _read_lock(rwlock_t *lock)
      * arch_lock_acquire_barrier().
      */
     if ( likely(_can_read_lock(cnts)) )
+    {
+        /* The slow path calls check_lock() via spin_lock(). */
+        check_lock(&lock->lock.debug, false);
         return;
+    }
 
     /* The slowpath will decrement the reader count, if necessary. */
     queue_read_lock_slowpath(lock);
@@ -162,7 +167,11 @@  static inline void _write_lock(rwlock_t *lock)
      * arch_lock_acquire_barrier().
      */
     if ( atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0 )
+    {
+        /* The slow path calls check_lock() via spin_lock(). */
+        check_lock(&lock->lock.debug, false);
         return;
+    }
 
     queue_write_lock_slowpath(lock);
     /*
@@ -197,6 +206,7 @@  static inline int _write_trylock(rwlock_t *lock)
     u32 cnts;
 
     preempt_disable();
+    check_lock(&lock->lock.debug, true);
     cnts = atomic_read(&lock->cnts);
     if ( unlikely(cnts) ||
          unlikely(atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) != 0) )
@@ -328,6 +338,11 @@  static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
         /* Drop the read lock because we don't need it anymore. */
         read_unlock(&percpu_rwlock->rwlock);
     }
+    else
+    {
+        /* All other paths have implicit check_lock() calls via read_lock(). */
+        check_lock(&percpu_rwlock->rwlock.lock.debug, false);
+    }
 }
 
 static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index ca13b600a0..9fa4e600c1 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -21,11 +21,13 @@  union lock_debug {
     };
 };
 #define _LOCK_DEBUG { LOCK_DEBUG_INITVAL }
+void check_lock(union lock_debug *debug, bool try);
 void spin_debug_enable(void);
 void spin_debug_disable(void);
 #else
 union lock_debug { };
 #define _LOCK_DEBUG { }
+#define check_lock(l, t) ((void)0)
 #define spin_debug_enable() ((void)0)
 #define spin_debug_disable() ((void)0)
 #endif