diff mbox series

[2/2] xen/rwlock: add check_lock() handling to rwlocks

Message ID 20201030142500.5464-3-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen/locking: fix and enhance lock debugging | expand

Commit Message

Jürgen Groß Oct. 30, 2020, 2:25 p.m. UTC
Checking whether a lock is consistently used regarding interrupts on
or off is beneficial for rwlocks, too.

So add check_lock() calls to rwlock functions. For this purpose make
check_lock() globally accessible.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/spinlock.c      |  3 +--
 xen/include/xen/rwlock.h   | 14 ++++++++++++++
 xen/include/xen/spinlock.h |  2 ++
 3 files changed, 17 insertions(+), 2 deletions(-)

Comments

Jan Beulich Oct. 30, 2020, 3:10 p.m. UTC | #1
On 30.10.2020 15:25, Juergen Gross wrote:
> --- a/xen/include/xen/rwlock.h
> +++ b/xen/include/xen/rwlock.h
> @@ -65,7 +65,11 @@ static inline int _read_trylock(rwlock_t *lock)
>           * arch_lock_acquire_barrier().
>           */
>          if ( likely(_can_read_lock(cnts)) )
> +        {
> +            check_lock(&lock->lock.debug, true);
>              return 1;
> +        }

Why not unconditionally earlier in the function?

> @@ -87,7 +91,10 @@ static inline void _read_lock(rwlock_t *lock)
>       * arch_lock_acquire_barrier().
>       */
>      if ( likely(_can_read_lock(cnts)) )
> +    {
> +        check_lock(&lock->lock.debug, false);
>          return;
> +    }
>  
>      /* The slowpath will decrement the reader count, if necessary. */
>      queue_read_lock_slowpath(lock);

I guess doing so here and ...

> @@ -162,7 +169,10 @@ static inline void _write_lock(rwlock_t *lock)
>       * arch_lock_acquire_barrier().
>       */
>      if ( atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0 )
> +    {
> +        check_lock(&lock->lock.debug, false);
>          return;
> +    }
>  
>      queue_write_lock_slowpath(lock);

... here is okay, as the slow paths have checks anyway.

> @@ -205,6 +215,8 @@ static inline int _write_trylock(rwlock_t *lock)
>          return 0;
>      }
>  
> +    check_lock(&lock->lock.debug, true);

But here I again think it wants moving up.

Jan
Jürgen Groß Oct. 30, 2020, 3:13 p.m. UTC | #2
On 30.10.20 16:10, Jan Beulich wrote:
> On 30.10.2020 15:25, Juergen Gross wrote:
>> --- a/xen/include/xen/rwlock.h
>> +++ b/xen/include/xen/rwlock.h
>> @@ -65,7 +65,11 @@ static inline int _read_trylock(rwlock_t *lock)
>>            * arch_lock_acquire_barrier().
>>            */
>>           if ( likely(_can_read_lock(cnts)) )
>> +        {
>> +            check_lock(&lock->lock.debug, true);
>>               return 1;
>> +        }
> 
> Why not unconditionally earlier in the function?

Its trylock, so we don't want to call check_lock() without having
got the lock.

> 
>> @@ -87,7 +91,10 @@ static inline void _read_lock(rwlock_t *lock)
>>        * arch_lock_acquire_barrier().
>>        */
>>       if ( likely(_can_read_lock(cnts)) )
>> +    {
>> +        check_lock(&lock->lock.debug, false);
>>           return;
>> +    }
>>   
>>       /* The slowpath will decrement the reader count, if necessary. */
>>       queue_read_lock_slowpath(lock);
> 
> I guess doing so here and ...
> 
>> @@ -162,7 +169,10 @@ static inline void _write_lock(rwlock_t *lock)
>>        * arch_lock_acquire_barrier().
>>        */
>>       if ( atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0 )
>> +    {
>> +        check_lock(&lock->lock.debug, false);
>>           return;
>> +    }
>>   
>>       queue_write_lock_slowpath(lock);
> 
> ... here is okay, as the slow paths have checks anyway.
> 
>> @@ -205,6 +215,8 @@ static inline int _write_trylock(rwlock_t *lock)
>>           return 0;
>>       }
>>   
>> +    check_lock(&lock->lock.debug, true);
> 
> But here I again think it wants moving up.

No, another trylock.


Juergen
Jürgen Groß Oct. 30, 2020, 3:20 p.m. UTC | #3
On 30.10.20 16:13, Jürgen Groß wrote:
> On 30.10.20 16:10, Jan Beulich wrote:
>> On 30.10.2020 15:25, Juergen Gross wrote:
>>> --- a/xen/include/xen/rwlock.h
>>> +++ b/xen/include/xen/rwlock.h
>>> @@ -65,7 +65,11 @@ static inline int _read_trylock(rwlock_t *lock)
>>>            * arch_lock_acquire_barrier().
>>>            */
>>>           if ( likely(_can_read_lock(cnts)) )
>>> +        {
>>> +            check_lock(&lock->lock.debug, true);
>>>               return 1;
>>> +        }
>>
>> Why not unconditionally earlier in the function?
> 
> Its trylock, so we don't want to call check_lock() without having
> got the lock.

Hmm, OTOH we do so for spinlocks, too.

So maybe its really better to move it up.


Juergen
diff mbox series

Patch

diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 54f0c55dc2..acb3f86339 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -13,7 +13,7 @@ 
 
 static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
 
-static void check_lock(union lock_debug *debug, bool try)
+void check_lock(union lock_debug *debug, bool try)
 {
     bool irq_safe = !local_irq_is_enabled();
 
@@ -108,7 +108,6 @@  void spin_debug_disable(void)
 
 #else /* CONFIG_DEBUG_LOCKS */
 
-#define check_lock(l, t) ((void)0)
 #define check_barrier(l) ((void)0)
 #define got_lock(l) ((void)0)
 #define rel_lock(l) ((void)0)
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index 427664037a..c302644705 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -65,7 +65,11 @@  static inline int _read_trylock(rwlock_t *lock)
          * arch_lock_acquire_barrier().
          */
         if ( likely(_can_read_lock(cnts)) )
+        {
+            check_lock(&lock->lock.debug, true);
             return 1;
+        }
+
         atomic_sub(_QR_BIAS, &lock->cnts);
     }
     preempt_enable();
@@ -87,7 +91,10 @@  static inline void _read_lock(rwlock_t *lock)
      * arch_lock_acquire_barrier().
      */
     if ( likely(_can_read_lock(cnts)) )
+    {
+        check_lock(&lock->lock.debug, false);
         return;
+    }
 
     /* The slowpath will decrement the reader count, if necessary. */
     queue_read_lock_slowpath(lock);
@@ -162,7 +169,10 @@  static inline void _write_lock(rwlock_t *lock)
      * arch_lock_acquire_barrier().
      */
     if ( atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0 )
+    {
+        check_lock(&lock->lock.debug, false);
         return;
+    }
 
     queue_write_lock_slowpath(lock);
     /*
@@ -205,6 +215,8 @@  static inline int _write_trylock(rwlock_t *lock)
         return 0;
     }
 
+    check_lock(&lock->lock.debug, true);
+
     /*
      * atomic_cmpxchg() is a full barrier so no need for an
      * arch_lock_acquire_barrier().
@@ -328,6 +340,8 @@  static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
         /* Drop the read lock because we don't need it anymore. */
         read_unlock(&percpu_rwlock->rwlock);
     }
+    else
+        check_lock(&percpu_rwlock->rwlock.lock.debug, false);
 }
 
 static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index ca13b600a0..9fa4e600c1 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -21,11 +21,13 @@  union lock_debug {
     };
 };
 #define _LOCK_DEBUG { LOCK_DEBUG_INITVAL }
+void check_lock(union lock_debug *debug, bool try);
 void spin_debug_enable(void);
 void spin_debug_disable(void);
 #else
 union lock_debug { };
 #define _LOCK_DEBUG { }
+#define check_lock(l, t) ((void)0)
 #define spin_debug_enable() ((void)0)
 #define spin_debug_disable() ((void)0)
 #endif