diff mbox series

[2/2] xen/spinlock: merge recurse_cpu and debug.cpu fields in struct spinlock

Message ID 20220224105436.1480-3-jgross@suse.com (mailing list archive)
State New, archived
Headers show
Series xen/spinlock: cleanup struct spinlock | expand

Commit Message

Juergen Gross Feb. 24, 2022, 10:54 a.m. UTC
Instead of having two fields in struct spinlock holding a cpu number,
just merge them. For this purpose get rid of union lock_debug and use a
32 bit sized union for cpu, recurse_cnt and the two debug booleans.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/arch/x86/mm/mm-locks.h |  6 ++---
 xen/common/spinlock.c      | 48 +++++++++++++++++++++-----------------
 xen/include/xen/spinlock.h | 43 ++++++++++++++++++----------------
 3 files changed, 52 insertions(+), 45 deletions(-)

Comments

Jan Beulich Feb. 24, 2022, 4:11 p.m. UTC | #1
On 24.02.2022 11:54, Juergen Gross wrote:
> --- a/xen/arch/x86/mm/mm-locks.h
> +++ b/xen/arch/x86/mm/mm-locks.h
> @@ -42,7 +42,7 @@ static inline void mm_lock_init(mm_lock_t *l)
>  
>  static inline bool mm_locked_by_me(const mm_lock_t *l)
>  {
> -    return (l->lock.recurse_cpu == current->processor);
> +    return (l->lock.data.cpu == current->processor);
>  }

I see a fair risk with this: Behavior will now differ between debug and
non-debug builds. E.g. a livelock because of trying to acquire the same
lock again would not be noticed in a debug build if the acquire is
conditional upon this function's return value. I think this is the main
reason behind having two separate field, despite the apparent redundancy.

Nevertheless a few more comments in case I'm missing something.

> @@ -81,19 +79,19 @@ static void check_barrier(spinlock_t *lock)
>       * However, if we spin on an IRQ-unsafe lock with IRQs disabled then that
>       * is clearly wrong, for the same reason outlined in check_lock() above.
>       */
> -    BUG_ON(!local_irq_is_enabled() && !lock->debug.irq_safe);
> +    BUG_ON(!local_irq_is_enabled() && !lock->data.irq_safe);
>  }
>  
>  static void got_lock(spinlock_t *lock)
>  {
> -    lock->debug.cpu = smp_processor_id();
> +    lock->data.cpu = smp_processor_id();

This assignment breaks ...

> @@ -230,9 +228,9 @@ int _spin_is_locked(spinlock_t *lock)
>       * "false" here, making this function suitable only for use in
>       * ASSERT()s and alike.
>       */
> -    return lock->recurse_cpu == SPINLOCK_NO_CPU
> +    return lock->data.cpu == SPINLOCK_NO_CPU

... the use of this field to distinguish recursively locked locks
from "simple" ones. At the very least the comment needs updating,
but ...

>             ? lock->tickets.head != lock->tickets.tail

... in how far this is still a sensible check in debug builds is
also questionable. The effect here certainly also deserves pointing
out in the description.

> -           : lock->recurse_cpu == smp_processor_id();
> +           : lock->data.cpu == smp_processor_id();
>  }
>  
>  int _spin_trylock(spinlock_t *lock)
> @@ -296,22 +294,24 @@ int _spin_trylock_recursive(spinlock_t *lock)
>  {
>      unsigned int cpu = smp_processor_id();
>  
> -    /* Don't allow overflow of recurse_cpu field. */
> +    /* Don't allow overflow of cpu field. */
>      BUILD_BUG_ON(NR_CPUS > SPINLOCK_NO_CPU);
>      BUILD_BUG_ON(SPINLOCK_RECURSE_BITS < 3);
>  
>      check_lock(lock, true);
>  
> -    if ( likely(lock->recurse_cpu != cpu) )
> +    if ( likely(lock->data.cpu != cpu) )
>      {
>          if ( !spin_trylock(lock) )
>              return 0;
> -        lock->recurse_cpu = cpu;
> +#ifndef CONFIG_DEBUG_LOCKS
> +        lock->data.cpu = cpu;
> +#endif

Maybe worth an ASSERT() in the #else case (and elsewhere as applicable)?

> --- a/xen/include/xen/spinlock.h
> +++ b/xen/include/xen/spinlock.h
> @@ -6,26 +6,34 @@
>  #include <asm/spinlock.h>
>  #include <asm/types.h>
>  
> -#define SPINLOCK_CPU_BITS  12
> +#define SPINLOCK_CPU_BITS      12
> +#define SPINLOCK_NO_CPU        ((1u << SPINLOCK_CPU_BITS) - 1)
> +#define SPINLOCK_RECURSE_BITS  (16 - SPINLOCK_CPU_BITS)
> +#define SPINLOCK_MAX_RECURSE   ((1u << SPINLOCK_RECURSE_BITS) - 1)
> +#define SPINLOCK_PAD_BITS      (30 - SPINLOCK_CPU_BITS - SPINLOCK_RECURSE_BITS)
>  
> -#ifdef CONFIG_DEBUG_LOCKS
> -union lock_debug {
> -    uint16_t val;
> -#define LOCK_DEBUG_INITVAL 0xffff
> +typedef union {
> +    u32 val;
>      struct {
> -        uint16_t cpu:SPINLOCK_CPU_BITS;
> -#define LOCK_DEBUG_PAD_BITS (14 - SPINLOCK_CPU_BITS)
> -        uint16_t :LOCK_DEBUG_PAD_BITS;
> +        u32 cpu:SPINLOCK_CPU_BITS;
> +        u32 recurse_cnt:SPINLOCK_RECURSE_BITS;
> +        u32 pad:SPINLOCK_PAD_BITS;
> +#ifdef CONFIG_DEBUG_LOCKS
>          bool irq_safe:1;
>          bool unseen:1;
> +#define SPINLOCK_DEBUG_INITVAL 0xc0000000
> +#else
> +        u32 debug_pad:2;

Prior to your change we had two well-formed uint16_t. You replace them by
five new instances of the being-phased-out u32?

Also - do the two padding fields really need names?

Jan
Juergen Gross Feb. 25, 2022, 8:36 a.m. UTC | #2
On 24.02.22 17:11, Jan Beulich wrote:
> On 24.02.2022 11:54, Juergen Gross wrote:
>> --- a/xen/arch/x86/mm/mm-locks.h
>> +++ b/xen/arch/x86/mm/mm-locks.h
>> @@ -42,7 +42,7 @@ static inline void mm_lock_init(mm_lock_t *l)
>>   
>>   static inline bool mm_locked_by_me(const mm_lock_t *l)
>>   {
>> -    return (l->lock.recurse_cpu == current->processor);
>> +    return (l->lock.data.cpu == current->processor);
>>   }
> 
> I see a fair risk with this: Behavior will now differ between debug and
> non-debug builds. E.g. a livelock because of trying to acquire the same
> lock again would not be noticed in a debug build if the acquire is
> conditional upon this function's return value. I think this is the main
> reason behind having two separate field, despite the apparent redundancy.

You are aware that mm_locked_by_me() is used for recursive spinlocks
only?

> 
> Nevertheless a few more comments in case I'm missing something.
> 
>> @@ -81,19 +79,19 @@ static void check_barrier(spinlock_t *lock)
>>        * However, if we spin on an IRQ-unsafe lock with IRQs disabled then that
>>        * is clearly wrong, for the same reason outlined in check_lock() above.
>>        */
>> -    BUG_ON(!local_irq_is_enabled() && !lock->debug.irq_safe);
>> +    BUG_ON(!local_irq_is_enabled() && !lock->data.irq_safe);
>>   }
>>   
>>   static void got_lock(spinlock_t *lock)
>>   {
>> -    lock->debug.cpu = smp_processor_id();
>> +    lock->data.cpu = smp_processor_id();
> 
> This assignment breaks ...
> 
>> @@ -230,9 +228,9 @@ int _spin_is_locked(spinlock_t *lock)
>>        * "false" here, making this function suitable only for use in
>>        * ASSERT()s and alike.
>>        */
>> -    return lock->recurse_cpu == SPINLOCK_NO_CPU
>> +    return lock->data.cpu == SPINLOCK_NO_CPU
> 
> ... the use of this field to distinguish recursively locked locks
> from "simple" ones. At the very least the comment needs updating,
> but ...
> 
>>              ? lock->tickets.head != lock->tickets.tail
> 
> ... in how far this is still a sensible check in debug builds is
> also questionable. The effect here certainly also deserves pointing
> out in the description.

Okay, will add something.

> 
>> -           : lock->recurse_cpu == smp_processor_id();
>> +           : lock->data.cpu == smp_processor_id();
>>   }
>>   
>>   int _spin_trylock(spinlock_t *lock)
>> @@ -296,22 +294,24 @@ int _spin_trylock_recursive(spinlock_t *lock)
>>   {
>>       unsigned int cpu = smp_processor_id();
>>   
>> -    /* Don't allow overflow of recurse_cpu field. */
>> +    /* Don't allow overflow of cpu field. */
>>       BUILD_BUG_ON(NR_CPUS > SPINLOCK_NO_CPU);
>>       BUILD_BUG_ON(SPINLOCK_RECURSE_BITS < 3);
>>   
>>       check_lock(lock, true);
>>   
>> -    if ( likely(lock->recurse_cpu != cpu) )
>> +    if ( likely(lock->data.cpu != cpu) )
>>       {
>>           if ( !spin_trylock(lock) )
>>               return 0;
>> -        lock->recurse_cpu = cpu;
>> +#ifndef CONFIG_DEBUG_LOCKS
>> +        lock->data.cpu = cpu;
>> +#endif
> 
> Maybe worth an ASSERT() in the #else case (and elsewhere as applicable)?

Okay.

> 
>> --- a/xen/include/xen/spinlock.h
>> +++ b/xen/include/xen/spinlock.h
>> @@ -6,26 +6,34 @@
>>   #include <asm/spinlock.h>
>>   #include <asm/types.h>
>>   
>> -#define SPINLOCK_CPU_BITS  12
>> +#define SPINLOCK_CPU_BITS      12
>> +#define SPINLOCK_NO_CPU        ((1u << SPINLOCK_CPU_BITS) - 1)
>> +#define SPINLOCK_RECURSE_BITS  (16 - SPINLOCK_CPU_BITS)
>> +#define SPINLOCK_MAX_RECURSE   ((1u << SPINLOCK_RECURSE_BITS) - 1)
>> +#define SPINLOCK_PAD_BITS      (30 - SPINLOCK_CPU_BITS - SPINLOCK_RECURSE_BITS)
>>   
>> -#ifdef CONFIG_DEBUG_LOCKS
>> -union lock_debug {
>> -    uint16_t val;
>> -#define LOCK_DEBUG_INITVAL 0xffff
>> +typedef union {
>> +    u32 val;
>>       struct {
>> -        uint16_t cpu:SPINLOCK_CPU_BITS;
>> -#define LOCK_DEBUG_PAD_BITS (14 - SPINLOCK_CPU_BITS)
>> -        uint16_t :LOCK_DEBUG_PAD_BITS;
>> +        u32 cpu:SPINLOCK_CPU_BITS;
>> +        u32 recurse_cnt:SPINLOCK_RECURSE_BITS;
>> +        u32 pad:SPINLOCK_PAD_BITS;
>> +#ifdef CONFIG_DEBUG_LOCKS
>>           bool irq_safe:1;
>>           bool unseen:1;
>> +#define SPINLOCK_DEBUG_INITVAL 0xc0000000
>> +#else
>> +        u32 debug_pad:2;
> 
> Prior to your change we had two well-formed uint16_t. You replace them by
> five new instances of the being-phased-out u32?

Oh, sorry. Will change to uint32_t.

> Also - do the two padding fields really need names?

I'm fine to drop them.


Juergen
Juergen Gross Feb. 25, 2022, 8:55 a.m. UTC | #3
On 25.02.22 09:36, Juergen Gross wrote:
> On 24.02.22 17:11, Jan Beulich wrote:
>> On 24.02.2022 11:54, Juergen Gross wrote:
>>> --- a/xen/arch/x86/mm/mm-locks.h
>>> +++ b/xen/arch/x86/mm/mm-locks.h
>>> @@ -42,7 +42,7 @@ static inline void mm_lock_init(mm_lock_t *l)
>>>   static inline bool mm_locked_by_me(const mm_lock_t *l)
>>>   {
>>> -    return (l->lock.recurse_cpu == current->processor);
>>> +    return (l->lock.data.cpu == current->processor);
>>>   }
>>
>> I see a fair risk with this: Behavior will now differ between debug and
>> non-debug builds. E.g. a livelock because of trying to acquire the same
>> lock again would not be noticed in a debug build if the acquire is
>> conditional upon this function's return value. I think this is the main
>> reason behind having two separate field, despite the apparent redundancy.
> 
> You are aware that mm_locked_by_me() is used for recursive spinlocks
> only?

BTW, it might make sense to add another bool for the debug case to mark
recursive lock usage. I don't think anything good can come from using a
lock in both modes (recursive and non-recursive).


Juergen
Jan Beulich Feb. 25, 2022, 9:13 a.m. UTC | #4
On 25.02.2022 09:36, Juergen Gross wrote:
> On 24.02.22 17:11, Jan Beulich wrote:
>> On 24.02.2022 11:54, Juergen Gross wrote:
>>> --- a/xen/arch/x86/mm/mm-locks.h
>>> +++ b/xen/arch/x86/mm/mm-locks.h
>>> @@ -42,7 +42,7 @@ static inline void mm_lock_init(mm_lock_t *l)
>>>   
>>>   static inline bool mm_locked_by_me(const mm_lock_t *l)
>>>   {
>>> -    return (l->lock.recurse_cpu == current->processor);
>>> +    return (l->lock.data.cpu == current->processor);
>>>   }
>>
>> I see a fair risk with this: Behavior will now differ between debug and
>> non-debug builds. E.g. a livelock because of trying to acquire the same
>> lock again would not be noticed in a debug build if the acquire is
>> conditional upon this function's return value. I think this is the main
>> reason behind having two separate field, despite the apparent redundancy.
> 
> You are aware that mm_locked_by_me() is used for recursive spinlocks
> only?

I will admit that it occurred to me only a while after writing the earlier
reply that it's used only with recursive locking, due to _mm_lock() indeed
using spin_lock_recursive() unconditionally (i.e. independent of its "rec"
parameter). Nevertheless I continue to have the vague recollection that
the duplication of the two fields was intentional and deemed necessary.

Jan
Jan Beulich Feb. 25, 2022, 9:24 a.m. UTC | #5
On 25.02.2022 09:55, Juergen Gross wrote:
> On 25.02.22 09:36, Juergen Gross wrote:
>> On 24.02.22 17:11, Jan Beulich wrote:
>>> On 24.02.2022 11:54, Juergen Gross wrote:
>>>> --- a/xen/arch/x86/mm/mm-locks.h
>>>> +++ b/xen/arch/x86/mm/mm-locks.h
>>>> @@ -42,7 +42,7 @@ static inline void mm_lock_init(mm_lock_t *l)
>>>>   static inline bool mm_locked_by_me(const mm_lock_t *l)
>>>>   {
>>>> -    return (l->lock.recurse_cpu == current->processor);
>>>> +    return (l->lock.data.cpu == current->processor);
>>>>   }
>>>
>>> I see a fair risk with this: Behavior will now differ between debug and
>>> non-debug builds. E.g. a livelock because of trying to acquire the same
>>> lock again would not be noticed in a debug build if the acquire is
>>> conditional upon this function's return value. I think this is the main
>>> reason behind having two separate field, despite the apparent redundancy.
>>
>> You are aware that mm_locked_by_me() is used for recursive spinlocks
>> only?
> 
> BTW, it might make sense to add another bool for the debug case to mark
> recursive lock usage. I don't think anything good can come from using a
> lock in both modes (recursive and non-recursive).

But beware of the coexisting paging_lock() and paging_lock_recursive().
Albeit I guess your comment was for spinlocks in general, not the
mm-lock machinery. Yet mentioning this reminds me of the page alloc
lock, which different paths acquire in different ways. So the bit
couldn't be a sticky one.

Jan
Juergen Gross Feb. 25, 2022, 9:38 a.m. UTC | #6
On 25.02.22 10:24, Jan Beulich wrote:
> On 25.02.2022 09:55, Juergen Gross wrote:
>> On 25.02.22 09:36, Juergen Gross wrote:
>>> On 24.02.22 17:11, Jan Beulich wrote:
>>>> On 24.02.2022 11:54, Juergen Gross wrote:
>>>>> --- a/xen/arch/x86/mm/mm-locks.h
>>>>> +++ b/xen/arch/x86/mm/mm-locks.h
>>>>> @@ -42,7 +42,7 @@ static inline void mm_lock_init(mm_lock_t *l)
>>>>>    static inline bool mm_locked_by_me(const mm_lock_t *l)
>>>>>    {
>>>>> -    return (l->lock.recurse_cpu == current->processor);
>>>>> +    return (l->lock.data.cpu == current->processor);
>>>>>    }
>>>>
>>>> I see a fair risk with this: Behavior will now differ between debug and
>>>> non-debug builds. E.g. a livelock because of trying to acquire the same
>>>> lock again would not be noticed in a debug build if the acquire is
>>>> conditional upon this function's return value. I think this is the main
>>>> reason behind having two separate field, despite the apparent redundancy.
>>>
>>> You are aware that mm_locked_by_me() is used for recursive spinlocks
>>> only?
>>
>> BTW, it might make sense to add another bool for the debug case to mark
>> recursive lock usage. I don't think anything good can come from using a
>> lock in both modes (recursive and non-recursive).
> 
> But beware of the coexisting paging_lock() and paging_lock_recursive().
> Albeit I guess your comment was for spinlocks in general, not the
> mm-lock machinery. Yet mentioning this reminds me of the page alloc
> lock, which different paths acquire in different ways. So the bit
> couldn't be a sticky one.

Interesting.

Seems as if e.g. console_lock is used in both ways, too.

Might be a good idea to at least add some self-deadlock detection
support to debug builds.


Juergen
diff mbox series

Patch

diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index fcfd4706ba..01cf3a820d 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -42,7 +42,7 @@  static inline void mm_lock_init(mm_lock_t *l)
 
 static inline bool mm_locked_by_me(const mm_lock_t *l)
 {
-    return (l->lock.recurse_cpu == current->processor);
+    return (l->lock.data.cpu == current->processor);
 }
 
 static inline int _get_lock_level(void)
@@ -94,7 +94,7 @@  static inline void _mm_lock(const struct domain *d, mm_lock_t *l,
     if ( !((mm_locked_by_me(l)) && rec) )
         _check_lock_level(d, level);
     spin_lock_recursive(&l->lock);
-    if ( l->lock.recurse_cnt == 1 )
+    if ( l->lock.data.recurse_cnt == 1 )
     {
         l->locker_function = func;
         l->unlock_level = _get_lock_level();
@@ -209,7 +209,7 @@  static inline void mm_read_unlock(mm_rwlock_t *l)
 
 static inline void mm_unlock(mm_lock_t *l)
 {
-    if ( l->lock.recurse_cnt == 1 )
+    if ( l->lock.data.recurse_cnt == 1 )
     {
         l->locker_function = "nobody";
         _set_lock_level(l->unlock_level);
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 53d6ab6853..33e6aaab1c 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -17,8 +17,6 @@  void check_lock(spinlock_t *lock, bool try)
 {
     bool irq_safe = !local_irq_is_enabled();
 
-    BUILD_BUG_ON(LOCK_DEBUG_PAD_BITS <= 0);
-
     if ( unlikely(atomic_read(&spin_debug) <= 0) )
         return;
 
@@ -49,12 +47,12 @@  void check_lock(spinlock_t *lock, bool try)
     if ( try && irq_safe )
         return;
 
-    if ( unlikely(lock->debug.irq_safe != irq_safe) )
+    if ( unlikely(lock->data.irq_safe != irq_safe) )
     {
-        union lock_debug seen, new = { 0 };
+        spinlock_data_t seen, new = { 0 };
 
         new.irq_safe = irq_safe;
-        seen.val = cmpxchg(&lock->debug.val, LOCK_DEBUG_INITVAL, new.val);
+        seen.val = cmpxchg(&lock->data.val, SPINLOCK_DATA_INITVAL, new.val);
 
         if ( !seen.unseen && seen.irq_safe == !irq_safe )
         {
@@ -81,19 +79,19 @@  static void check_barrier(spinlock_t *lock)
      * However, if we spin on an IRQ-unsafe lock with IRQs disabled then that
      * is clearly wrong, for the same reason outlined in check_lock() above.
      */
-    BUG_ON(!local_irq_is_enabled() && !lock->debug.irq_safe);
+    BUG_ON(!local_irq_is_enabled() && !lock->data.irq_safe);
 }
 
 static void got_lock(spinlock_t *lock)
 {
-    lock->debug.cpu = smp_processor_id();
+    lock->data.cpu = smp_processor_id();
 }
 
 static void rel_lock(spinlock_t *lock)
 {
     if ( atomic_read(&spin_debug) > 0 )
-        BUG_ON(lock->debug.cpu != smp_processor_id());
-    lock->debug.cpu = SPINLOCK_NO_CPU;
+        BUG_ON(lock->data.cpu != smp_processor_id());
+    lock->data.cpu = SPINLOCK_NO_CPU;
 }
 
 void spin_debug_enable(void)
@@ -230,9 +228,9 @@  int _spin_is_locked(spinlock_t *lock)
      * "false" here, making this function suitable only for use in
      * ASSERT()s and alike.
      */
-    return lock->recurse_cpu == SPINLOCK_NO_CPU
+    return lock->data.cpu == SPINLOCK_NO_CPU
            ? lock->tickets.head != lock->tickets.tail
-           : lock->recurse_cpu == smp_processor_id();
+           : lock->data.cpu == smp_processor_id();
 }
 
 int _spin_trylock(spinlock_t *lock)
@@ -296,22 +294,24 @@  int _spin_trylock_recursive(spinlock_t *lock)
 {
     unsigned int cpu = smp_processor_id();
 
-    /* Don't allow overflow of recurse_cpu field. */
+    /* Don't allow overflow of cpu field. */
     BUILD_BUG_ON(NR_CPUS > SPINLOCK_NO_CPU);
     BUILD_BUG_ON(SPINLOCK_RECURSE_BITS < 3);
 
     check_lock(lock, true);
 
-    if ( likely(lock->recurse_cpu != cpu) )
+    if ( likely(lock->data.cpu != cpu) )
     {
         if ( !spin_trylock(lock) )
             return 0;
-        lock->recurse_cpu = cpu;
+#ifndef CONFIG_DEBUG_LOCKS
+        lock->data.cpu = cpu;
+#endif
     }
 
     /* We support only fairly shallow recursion, else the counter overflows. */
-    ASSERT(lock->recurse_cnt < SPINLOCK_MAX_RECURSE);
-    lock->recurse_cnt++;
+    ASSERT(lock->data.recurse_cnt < SPINLOCK_MAX_RECURSE);
+    lock->data.recurse_cnt++;
 
     return 1;
 }
@@ -320,22 +320,26 @@  void _spin_lock_recursive(spinlock_t *lock)
 {
     unsigned int cpu = smp_processor_id();
 
-    if ( likely(lock->recurse_cpu != cpu) )
+    if ( likely(lock->data.cpu != cpu) )
     {
         _spin_lock(lock);
-        lock->recurse_cpu = cpu;
+#ifndef CONFIG_DEBUG_LOCKS
+        lock->data.cpu = cpu;
+#endif
     }
 
     /* We support only fairly shallow recursion, else the counter overflows. */
-    ASSERT(lock->recurse_cnt < SPINLOCK_MAX_RECURSE);
-    lock->recurse_cnt++;
+    ASSERT(lock->data.recurse_cnt < SPINLOCK_MAX_RECURSE);
+    lock->data.recurse_cnt++;
 }
 
 void _spin_unlock_recursive(spinlock_t *lock)
 {
-    if ( likely(--lock->recurse_cnt == 0) )
+    if ( likely(--lock->data.recurse_cnt == 0) )
     {
-        lock->recurse_cpu = SPINLOCK_NO_CPU;
+#ifndef CONFIG_DEBUG_LOCKS
+        lock->data.cpu = SPINLOCK_NO_CPU;
+#endif
         spin_unlock(lock);
     }
 }
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index 5b6b73732f..61731b5d29 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -6,26 +6,34 @@ 
 #include <asm/spinlock.h>
 #include <asm/types.h>
 
-#define SPINLOCK_CPU_BITS  12
+#define SPINLOCK_CPU_BITS      12
+#define SPINLOCK_NO_CPU        ((1u << SPINLOCK_CPU_BITS) - 1)
+#define SPINLOCK_RECURSE_BITS  (16 - SPINLOCK_CPU_BITS)
+#define SPINLOCK_MAX_RECURSE   ((1u << SPINLOCK_RECURSE_BITS) - 1)
+#define SPINLOCK_PAD_BITS      (30 - SPINLOCK_CPU_BITS - SPINLOCK_RECURSE_BITS)
 
-#ifdef CONFIG_DEBUG_LOCKS
-union lock_debug {
-    uint16_t val;
-#define LOCK_DEBUG_INITVAL 0xffff
+typedef union {
+    u32 val;
     struct {
-        uint16_t cpu:SPINLOCK_CPU_BITS;
-#define LOCK_DEBUG_PAD_BITS (14 - SPINLOCK_CPU_BITS)
-        uint16_t :LOCK_DEBUG_PAD_BITS;
+        u32 cpu:SPINLOCK_CPU_BITS;
+        u32 recurse_cnt:SPINLOCK_RECURSE_BITS;
+        u32 pad:SPINLOCK_PAD_BITS;
+#ifdef CONFIG_DEBUG_LOCKS
         bool irq_safe:1;
         bool unseen:1;
+#define SPINLOCK_DEBUG_INITVAL 0xc0000000
+#else
+        u32 debug_pad:2;
+#define SPINLOCK_DEBUG_INITVAL 0x00000000
+#endif
     };
-};
-#define _LOCK_DEBUG { LOCK_DEBUG_INITVAL }
+} spinlock_data_t;
+#define SPINLOCK_DATA_INITVAL (SPINLOCK_NO_CPU | SPINLOCK_DEBUG_INITVAL)
+
+#ifdef CONFIG_DEBUG_LOCKS
 void spin_debug_enable(void);
 void spin_debug_disable(void);
 #else
-union lock_debug { };
-#define _LOCK_DEBUG { }
 #define spin_debug_enable() ((void)0)
 #define spin_debug_disable() ((void)0)
 #endif
@@ -92,7 +100,7 @@  struct lock_profile_qhead {
     static struct lock_profile * const __lock_profile_##name                  \
     __used_section(".lockprofile.data") =                                     \
     &__lock_profile_data_##name
-#define _SPIN_LOCK_UNLOCKED(x) { { 0 }, SPINLOCK_NO_CPU, 0, _LOCK_DEBUG, x }
+#define _SPIN_LOCK_UNLOCKED(x) { { 0 }, { SPINLOCK_DATA_INITVAL }, x }
 #define SPIN_LOCK_UNLOCKED _SPIN_LOCK_UNLOCKED(NULL)
 #define DEFINE_SPINLOCK(l)                                                    \
     spinlock_t l = _SPIN_LOCK_UNLOCKED(NULL);                                 \
@@ -134,7 +142,7 @@  extern void cf_check spinlock_profile_reset(unsigned char key);
 
 struct lock_profile_qhead { };
 
-#define SPIN_LOCK_UNLOCKED { { 0 }, SPINLOCK_NO_CPU, 0, _LOCK_DEBUG }
+#define SPIN_LOCK_UNLOCKED { { 0 }, { SPINLOCK_DATA_INITVAL } }
 #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
 
 #define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l))
@@ -156,12 +164,7 @@  typedef union {
 
 typedef struct spinlock {
     spinlock_tickets_t tickets;
-    u16 recurse_cpu:SPINLOCK_CPU_BITS;
-#define SPINLOCK_NO_CPU        ((1u << SPINLOCK_CPU_BITS) - 1)
-#define SPINLOCK_RECURSE_BITS  (16 - SPINLOCK_CPU_BITS)
-    u16 recurse_cnt:SPINLOCK_RECURSE_BITS;
-#define SPINLOCK_MAX_RECURSE   ((1u << SPINLOCK_RECURSE_BITS) - 1)
-    union lock_debug debug;
+    spinlock_data_t data;
 #ifdef CONFIG_DEBUG_LOCK_PROFILE
     struct lock_profile *profile;
 #endif