diff mbox series

[v5,12/13] xen/rwlock: raise the number of possible cpus

Message ID 20240314072029.16937-13-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen/spinlock: make recursive spinlocks a dedicated type | expand

Commit Message

Jürgen Groß March 14, 2024, 7:20 a.m. UTC
The rwlock handling is limiting the number of cpus to 4095 today. The
main reason is the use of the atomic_t data type for the main lock
handling, which needs 2 bits for the locking state (writer waiting or
write locked), 12 bits for the id of a possible writer, and a 12 bit
counter for readers. The limit isn't 4096 due to an off by one sanity
check.

The atomic_t data type is 32 bits wide, so in theory 15 bits for the
writer's cpu id and 15 bits for the reader count seem to be fine, but
via read_trylock() more readers than cpus are possible.

This means that it is possible to raise the number of cpus to 16384
without changing the rwlock_t data structure. In order to avoid the
reader count wrapping to zero, don't let read_trylock() succeed in case
the highest bit of the reader's count is set already. This leaves enough
headroom for non-recursive readers to enter without risking a wrap.

While at it calculate _QW_CPUMASK and _QR_SHIFT from _QW_SHIFT and
add a sanity check for not overflowing the atomic_t data type.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V5:
- new patch
---
 xen/include/xen/rwlock.h | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

Comments

Jan Beulich March 18, 2024, 3:39 p.m. UTC | #1
On 14.03.2024 08:20, Juergen Gross wrote:
> The rwlock handling is limiting the number of cpus to 4095 today. The
> main reason is the use of the atomic_t data type for the main lock
> handling, which needs 2 bits for the locking state (writer waiting or
> write locked), 12 bits for the id of a possible writer, and a 12 bit
> counter for readers. The limit isn't 4096 due to an off by one sanity
> check.
> 
> The atomic_t data type is 32 bits wide, so in theory 15 bits for the
> writer's cpu id and 15 bits for the reader count seem to be fine, but
> via read_trylock() more readers than cpus are possible.

As a result, afaict you choose to use just 14 bits for the CPU, but
still 15 bits (with the 16th to deal with overflow) for the reader count.
That could do with making explicit here, as a question is whether we
deem as sufficient that there is just one extra bit for the reader
count.

> --- a/xen/include/xen/rwlock.h
> +++ b/xen/include/xen/rwlock.h
> @@ -23,12 +23,12 @@ typedef struct {
>  #define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
>  
>  /* Writer states & reader shift and bias. */
> -#define    _QW_CPUMASK  0xfffU             /* Writer CPU mask */
> -#define    _QW_SHIFT    12                 /* Writer flags shift */
> -#define    _QW_WAITING  (1U << _QW_SHIFT)  /* A writer is waiting */
> -#define    _QW_LOCKED   (3U << _QW_SHIFT)  /* A writer holds the lock */
> -#define    _QW_WMASK    (3U << _QW_SHIFT)  /* Writer mask */
> -#define    _QR_SHIFT    14                 /* Reader count shift */
> +#define    _QW_SHIFT    14                      /* Writer flags shift */
> +#define    _QW_CPUMASK  ((1U << _QW_SHIFT) - 1) /* Writer CPU mask */
> +#define    _QW_WAITING  (1U << _QW_SHIFT)       /* A writer is waiting */
> +#define    _QW_LOCKED   (3U << _QW_SHIFT)       /* A writer holds the lock */
> +#define    _QW_WMASK    (3U << _QW_SHIFT)       /* Writer mask */
> +#define    _QR_SHIFT    (_QW_SHIFT + 2)         /* Reader count shift */
>  #define    _QR_BIAS     (1U << _QR_SHIFT)

Btw, seeing all the uppercase U suffixes here, I think you had some
lowercase ones earlier in the series. While Misra doesn't demand
uppercase for U, it does for L and iirc we decided to use all
uppercase suffixes as a result. Would be nice if what goes in could
have this correct right away.

> @@ -36,14 +36,16 @@ void queue_write_lock_slowpath(rwlock_t *lock);
>  
>  static inline bool _is_write_locked_by_me(unsigned int cnts)
>  {
> -    BUILD_BUG_ON(_QW_CPUMASK < NR_CPUS);
> +    BUILD_BUG_ON((_QW_CPUMASK + 1) < NR_CPUS);
> +    BUILD_BUG_ON(NR_CPUS * _QR_BIAS > INT_MAX);
>      return (cnts & _QW_WMASK) == _QW_LOCKED &&
>             (cnts & _QW_CPUMASK) == smp_processor_id();
>  }
>  
>  static inline bool _can_read_lock(unsigned int cnts)
>  {
> -    return !(cnts & _QW_WMASK) || _is_write_locked_by_me(cnts);
> +    return cnts <= INT_MAX &&
> +           (!(cnts & _QW_WMASK) || _is_write_locked_by_me(cnts));
>  }

I view this as problematic: Code knowing that a write lock is being held
may invoke a function using read_trylock() and expect the lock to be
available there.

Jan
Jürgen Groß March 18, 2024, 4 p.m. UTC | #2
On 18.03.24 16:39, Jan Beulich wrote:
> On 14.03.2024 08:20, Juergen Gross wrote:
>> The rwlock handling is limiting the number of cpus to 4095 today. The
>> main reason is the use of the atomic_t data type for the main lock
>> handling, which needs 2 bits for the locking state (writer waiting or
>> write locked), 12 bits for the id of a possible writer, and a 12 bit
>> counter for readers. The limit isn't 4096 due to an off by one sanity
>> check.
>>
>> The atomic_t data type is 32 bits wide, so in theory 15 bits for the
>> writer's cpu id and 15 bits for the reader count seem to be fine, but
>> via read_trylock() more readers than cpus are possible.
> 
> As a result, afaict you choose to use just 14 bits for the CPU, but
> still 15 bits (with the 16th to deal with overflow) for the reader count.
> That could do with making explicit here, as a question is whether we
> deem as sufficient that there is just one extra bit for the reader
> count.

Okay, I'll add a sentence to the commit message.

> 
>> --- a/xen/include/xen/rwlock.h
>> +++ b/xen/include/xen/rwlock.h
>> @@ -23,12 +23,12 @@ typedef struct {
>>   #define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
>>   
>>   /* Writer states & reader shift and bias. */
>> -#define    _QW_CPUMASK  0xfffU             /* Writer CPU mask */
>> -#define    _QW_SHIFT    12                 /* Writer flags shift */
>> -#define    _QW_WAITING  (1U << _QW_SHIFT)  /* A writer is waiting */
>> -#define    _QW_LOCKED   (3U << _QW_SHIFT)  /* A writer holds the lock */
>> -#define    _QW_WMASK    (3U << _QW_SHIFT)  /* Writer mask */
>> -#define    _QR_SHIFT    14                 /* Reader count shift */
>> +#define    _QW_SHIFT    14                      /* Writer flags shift */
>> +#define    _QW_CPUMASK  ((1U << _QW_SHIFT) - 1) /* Writer CPU mask */
>> +#define    _QW_WAITING  (1U << _QW_SHIFT)       /* A writer is waiting */
>> +#define    _QW_LOCKED   (3U << _QW_SHIFT)       /* A writer holds the lock */
>> +#define    _QW_WMASK    (3U << _QW_SHIFT)       /* Writer mask */
>> +#define    _QR_SHIFT    (_QW_SHIFT + 2)         /* Reader count shift */
>>   #define    _QR_BIAS     (1U << _QR_SHIFT)
> 
> Btw, seeing all the uppercase U suffixes here, I think you had some
> lowercase ones earlier in the series. While Misra doesn't demand
> uppercase for U, it does for L and iirc we decided to use all
> uppercase suffixes as a result. Would be nice if what goes in could
> have this correct right away.

I'll rescan all the patches and change them accordingly.

> 
>> @@ -36,14 +36,16 @@ void queue_write_lock_slowpath(rwlock_t *lock);
>>   
>>   static inline bool _is_write_locked_by_me(unsigned int cnts)
>>   {
>> -    BUILD_BUG_ON(_QW_CPUMASK < NR_CPUS);
>> +    BUILD_BUG_ON((_QW_CPUMASK + 1) < NR_CPUS);
>> +    BUILD_BUG_ON(NR_CPUS * _QR_BIAS > INT_MAX);
>>       return (cnts & _QW_WMASK) == _QW_LOCKED &&
>>              (cnts & _QW_CPUMASK) == smp_processor_id();
>>   }
>>   
>>   static inline bool _can_read_lock(unsigned int cnts)
>>   {
>> -    return !(cnts & _QW_WMASK) || _is_write_locked_by_me(cnts);
>> +    return cnts <= INT_MAX &&
>> +           (!(cnts & _QW_WMASK) || _is_write_locked_by_me(cnts));
>>   }
> 
> I view this as problematic: Code knowing that a write lock is being held
> may invoke a function using read_trylock() and expect the lock to be
> available there.

So you expect it to be fine that someone is using read_trylock() 32768 times
recursively while holding a lock as a writer? Sure, I can change the condition,
but OTOH ...


Juergen
Jan Beulich March 18, 2024, 4:05 p.m. UTC | #3
On 18.03.2024 17:00, Jürgen Groß wrote:
> On 18.03.24 16:39, Jan Beulich wrote:
>> On 14.03.2024 08:20, Juergen Gross wrote:
>>> @@ -36,14 +36,16 @@ void queue_write_lock_slowpath(rwlock_t *lock);
>>>   
>>>   static inline bool _is_write_locked_by_me(unsigned int cnts)
>>>   {
>>> -    BUILD_BUG_ON(_QW_CPUMASK < NR_CPUS);
>>> +    BUILD_BUG_ON((_QW_CPUMASK + 1) < NR_CPUS);
>>> +    BUILD_BUG_ON(NR_CPUS * _QR_BIAS > INT_MAX);
>>>       return (cnts & _QW_WMASK) == _QW_LOCKED &&
>>>              (cnts & _QW_CPUMASK) == smp_processor_id();
>>>   }
>>>   
>>>   static inline bool _can_read_lock(unsigned int cnts)
>>>   {
>>> -    return !(cnts & _QW_WMASK) || _is_write_locked_by_me(cnts);
>>> +    return cnts <= INT_MAX &&
>>> +           (!(cnts & _QW_WMASK) || _is_write_locked_by_me(cnts));
>>>   }
>>
>> I view this as problematic: Code knowing that a write lock is being held
>> may invoke a function using read_trylock() and expect the lock to be
>> available there.
> 
> So you expect it to be fine that someone is using read_trylock() 32768 times
> recursively while holding a lock as a writer? Sure, I can change the condition,
> but OTOH ...

Hmm, yes, the reader count (leaving aside nested read_trylock()) is zero
when the lock is held for writing. So yes, I agree the condition is fine,
but may I ask for a brief comment to this effect, for blind people like
me?

Jan
Jürgen Groß March 18, 2024, 4:06 p.m. UTC | #4
On 18.03.24 17:05, Jan Beulich wrote:
> On 18.03.2024 17:00, Jürgen Groß wrote:
>> On 18.03.24 16:39, Jan Beulich wrote:
>>> On 14.03.2024 08:20, Juergen Gross wrote:
>>>> @@ -36,14 +36,16 @@ void queue_write_lock_slowpath(rwlock_t *lock);
>>>>    
>>>>    static inline bool _is_write_locked_by_me(unsigned int cnts)
>>>>    {
>>>> -    BUILD_BUG_ON(_QW_CPUMASK < NR_CPUS);
>>>> +    BUILD_BUG_ON((_QW_CPUMASK + 1) < NR_CPUS);
>>>> +    BUILD_BUG_ON(NR_CPUS * _QR_BIAS > INT_MAX);
>>>>        return (cnts & _QW_WMASK) == _QW_LOCKED &&
>>>>               (cnts & _QW_CPUMASK) == smp_processor_id();
>>>>    }
>>>>    
>>>>    static inline bool _can_read_lock(unsigned int cnts)
>>>>    {
>>>> -    return !(cnts & _QW_WMASK) || _is_write_locked_by_me(cnts);
>>>> +    return cnts <= INT_MAX &&
>>>> +           (!(cnts & _QW_WMASK) || _is_write_locked_by_me(cnts));
>>>>    }
>>>
>>> I view this as problematic: Code knowing that a write lock is being held
>>> may invoke a function using read_trylock() and expect the lock to be
>>> available there.
>>
>> So you expect it to be fine that someone is using read_trylock() 32768 times
>> recursively while holding a lock as a writer? Sure, I can change the condition,
>> but OTOH ...
> 
> Hmm, yes, the reader count (leaving aside nested read_trylock()) is zero
> when the lock is held for writing. So yes, I agree the condition is fine,
> but may I ask for a brief comment to this effect, for blind people like
> me?

Yeah, fine with me. :-)


Juergen
diff mbox series

Patch

diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index 65d88b0ef4..afd57659bd 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -23,12 +23,12 @@  typedef struct {
 #define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
 
 /* Writer states & reader shift and bias. */
-#define    _QW_CPUMASK  0xfffU             /* Writer CPU mask */
-#define    _QW_SHIFT    12                 /* Writer flags shift */
-#define    _QW_WAITING  (1U << _QW_SHIFT)  /* A writer is waiting */
-#define    _QW_LOCKED   (3U << _QW_SHIFT)  /* A writer holds the lock */
-#define    _QW_WMASK    (3U << _QW_SHIFT)  /* Writer mask */
-#define    _QR_SHIFT    14                 /* Reader count shift */
+#define    _QW_SHIFT    14                      /* Writer flags shift */
+#define    _QW_CPUMASK  ((1U << _QW_SHIFT) - 1) /* Writer CPU mask */
+#define    _QW_WAITING  (1U << _QW_SHIFT)       /* A writer is waiting */
+#define    _QW_LOCKED   (3U << _QW_SHIFT)       /* A writer holds the lock */
+#define    _QW_WMASK    (3U << _QW_SHIFT)       /* Writer mask */
+#define    _QR_SHIFT    (_QW_SHIFT + 2)         /* Reader count shift */
 #define    _QR_BIAS     (1U << _QR_SHIFT)
 
 void queue_read_lock_slowpath(rwlock_t *lock);
@@ -36,14 +36,16 @@  void queue_write_lock_slowpath(rwlock_t *lock);
 
 static inline bool _is_write_locked_by_me(unsigned int cnts)
 {
-    BUILD_BUG_ON(_QW_CPUMASK < NR_CPUS);
+    BUILD_BUG_ON((_QW_CPUMASK + 1) < NR_CPUS);
+    BUILD_BUG_ON(NR_CPUS * _QR_BIAS > INT_MAX);
     return (cnts & _QW_WMASK) == _QW_LOCKED &&
            (cnts & _QW_CPUMASK) == smp_processor_id();
 }
 
 static inline bool _can_read_lock(unsigned int cnts)
 {
-    return !(cnts & _QW_WMASK) || _is_write_locked_by_me(cnts);
+    return cnts <= INT_MAX &&
+           (!(cnts & _QW_WMASK) || _is_write_locked_by_me(cnts));
 }
 
 /*