diff mbox series

spinlock: alter inlining of _spin_lock_cb()

Message ID dd75280a-167c-6a1d-87e4-edbffe12cf3c@suse.com (mailing list archive)
State New, archived
Headers show
Series spinlock: alter inlining of _spin_lock_cb() | expand

Commit Message

Jan Beulich June 14, 2023, 2:17 p.m. UTC
To comply with Misra rule 8.10 ("An inline function shall be declared
with the static storage class"), convert what is presently
_spin_lock_cb() to an always-inline (and static) helper, while making
the function itself a thin wrapper, just like _spin_lock() is.

While there drop the unlikely() from the callback check, and correct
indentation in _spin_lock().

Signed-off-by: Jan Beulich <jbeulich@suse.com>

Comments

Stefano Stabellini June 14, 2023, 11:41 p.m. UTC | #1
On Wed, 14 Jun 2023, Jan Beulich wrote:
> To comply with Misra rule 8.10 ("An inline function shall be declared
> with the static storage class"), convert what is presently
> _spin_lock_cb() to an always-inline (and static) helper, while making
> the function itself a thin wrapper, just like _spin_lock() is.
> 
> While there drop the unlikely() from the callback check, and correct
> indentation in _spin_lock().
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>


> --- a/xen/common/spinlock.c
> +++ b/xen/common/spinlock.c
> @@ -304,7 +304,8 @@ static always_inline u16 observe_head(sp
>      return read_atomic(&t->head);
>  }
>  
> -void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
> +static void always_inline spin_lock_common(spinlock_t *lock,
> +                                           void (*cb)(void *), void *data)
>  {
>      spinlock_tickets_t tickets = SPINLOCK_TICKET_INC;
>      LOCK_PROFILE_VAR;
> @@ -316,7 +317,7 @@ void inline _spin_lock_cb(spinlock_t *lo
>      while ( tickets.tail != observe_head(&lock->tickets) )
>      {
>          LOCK_PROFILE_BLOCK;
> -        if ( unlikely(cb) )
> +        if ( cb )
>              cb(data);
>          arch_lock_relax();
>      }
> @@ -327,7 +328,12 @@ void inline _spin_lock_cb(spinlock_t *lo
>  
>  void _spin_lock(spinlock_t *lock)
>  {
> -     _spin_lock_cb(lock, NULL, NULL);
> +    spin_lock_common(lock, NULL, NULL);
> +}
> +
> +void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
> +{
> +    spin_lock_common(lock, cb, data);
>  }
>  
>  void _spin_lock_irq(spinlock_t *lock)
>
diff mbox series

Patch

--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -304,7 +304,8 @@  static always_inline u16 observe_head(sp
     return read_atomic(&t->head);
 }
 
-void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
+static void always_inline spin_lock_common(spinlock_t *lock,
+                                           void (*cb)(void *), void *data)
 {
     spinlock_tickets_t tickets = SPINLOCK_TICKET_INC;
     LOCK_PROFILE_VAR;
@@ -316,7 +317,7 @@  void inline _spin_lock_cb(spinlock_t *lo
     while ( tickets.tail != observe_head(&lock->tickets) )
     {
         LOCK_PROFILE_BLOCK;
-        if ( unlikely(cb) )
+        if ( cb )
             cb(data);
         arch_lock_relax();
     }
@@ -327,7 +328,12 @@  void inline _spin_lock_cb(spinlock_t *lo
 
 void _spin_lock(spinlock_t *lock)
 {
-     _spin_lock_cb(lock, NULL, NULL);
+    spin_lock_common(lock, NULL, NULL);
+}
+
+void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
+{
+    spin_lock_common(lock, cb, data);
 }
 
 void _spin_lock_irq(spinlock_t *lock)