diff mbox series

[V9,04/15] asm-generic: ticket-lock: Keep ticket-lock the same semantic with qspinlock

Message ID 20220808071318.3335746-5-guoren@kernel.org (mailing list archive)
State New, archived
Headers show
Series arch: Add qspinlock support and atomic cleanup | expand

Commit Message

Guo Ren Aug. 8, 2022, 7:13 a.m. UTC
From: Guo Ren <guoren@linux.alibaba.com>

Define smp_mb__after_spinlock by smp_mb as default behavior to give RCsc
synchronization point for all architectures. Keep the same semantic with
qspinlock, a acquire (RCpc) synchronization point. More detail, see
include/linux/spinlock.h.

Some architectures could give more robust semantics than smp_mb, eg.
riscv. Some architectures needn't smp_mb__after_spinlock because their
spinlocks have contained an RCsc.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
---
 include/asm-generic/spinlock.h        |  5 +++++
 include/asm-generic/ticket_spinlock.h | 18 ++++--------------
 2 files changed, 9 insertions(+), 14 deletions(-)
diff mbox series

Patch

diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
index 970590baf61b..6f5a1b838ca2 100644
--- a/include/asm-generic/spinlock.h
+++ b/include/asm-generic/spinlock.h
@@ -6,4 +6,9 @@ 
 #include <asm-generic/ticket_spinlock.h>
 #include <asm/qrwlock.h>
 
+/* See include/linux/spinlock.h */
+#ifndef smp_mb__after_spinlock
+#define smp_mb__after_spinlock()	smp_mb()
+#endif
+
 #endif /* __ASM_GENERIC_SPINLOCK_H */
diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h
index cfcff22b37b3..d8e6ec82f096 100644
--- a/include/asm-generic/ticket_spinlock.h
+++ b/include/asm-generic/ticket_spinlock.h
@@ -14,9 +14,8 @@ 
  * a test-and-set.
  *
  * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence
- * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with
- * a full fence after the spin to upgrade the otherwise-RCpc
- * atomic_cond_read_acquire().
+ * uses smp_mb__after_spinlock which is RCsc to create an RCsc hot path, See
+ * include/linux/spinlock.h
  *
  * The implementation uses smp_cond_load_acquire() to spin, so if the
  * architecture has WFE like instructions to sleep instead of poll for word
@@ -32,22 +31,13 @@ 
 
 static __always_inline void ticket_spin_lock(arch_spinlock_t *lock)
 {
-	u32 val = atomic_fetch_add(1<<16, &lock->val);
+	u32 val = atomic_fetch_add_acquire(1<<16, &lock->val);
 	u16 ticket = val >> 16;
 
 	if (ticket == (u16)val)
 		return;
 
-	/*
-	 * atomic_cond_read_acquire() is RCpc, but rather than defining a
-	 * custom cond_read_rcsc() here we just emit a full fence.  We only
-	 * need the prior reads before subsequent writes ordering from
-	 * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we
-	 * have no outstanding writes due to the atomic_fetch_add() the extra
-	 * orderings are free.
-	 */
 	atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL);
-	smp_mb();
 }
 
 static __always_inline bool ticket_spin_trylock(arch_spinlock_t *lock)
@@ -57,7 +47,7 @@  static __always_inline bool ticket_spin_trylock(arch_spinlock_t *lock)
 	if ((old >> 16) != (old & 0xffff))
 		return false;
 
-	return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */
+	return atomic_try_cmpxchg_acquire(&lock->val, &old, old + (1<<16));
 }
 
 static __always_inline void ticket_spin_unlock(arch_spinlock_t *lock)