@@ -35,6 +35,7 @@ config ARM
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_IPC_PARSE_VERSION
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
@@ -18,6 +18,7 @@ generic-y += preempt.h
generic-y += seccomp.h
generic-y += serial.h
generic-y += trace_clock.h
+generic-y += qspinlock.h
generic-y += qrwlock.h
generated-y += mach-types.h
@@ -52,83 +52,10 @@ static inline void dsb_sev(void)
* release it, because V6 CPUs are assumed to have weakly ordered
* memory.
*/
+
#include <asm/qrwlock.h>
+#include <asm/qspinlock.h>
#define smp_mb__after_spinlock() smp_mb()
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- unsigned long tmp;
- u32 newval;
- arch_spinlock_t lockval;
-
- prefetchw(&lock->slock);
- __asm__ __volatile__(
-"1: ldrex %0, [%3]\n"
-" add %1, %0, %4\n"
-" strex %2, %1, [%3]\n"
-" teq %2, #0\n"
-" bne 1b"
- : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
- : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
- : "cc");
-
- while (lockval.tickets.next != lockval.tickets.owner) {
- wfe();
- lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
- }
-
- smp_mb();
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- unsigned long contended, res;
- u32 slock;
-
- prefetchw(&lock->slock);
- do {
- __asm__ __volatile__(
- " ldrex %0, [%3]\n"
- " mov %2, #0\n"
- " subs %1, %0, %0, ror #16\n"
- " addeq %0, %0, %4\n"
- " strexeq %2, %0, [%3]"
- : "=&r" (slock), "=&r" (contended), "=&r" (res)
- : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
- : "cc");
- } while (res);
-
- if (!contended) {
- smp_mb();
- return 1;
- } else {
- return 0;
- }
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- smp_mb();
- lock->tickets.owner++;
- dsb_sev();
-}
-
-static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
-{
- return lock.tickets.owner == lock.tickets.next;
-}
-
-static inline int arch_spin_is_locked(arch_spinlock_t *lock)
-{
- return !arch_spin_value_unlocked(READ_ONCE(*lock));
-}
-
-static inline int arch_spin_is_contended(arch_spinlock_t *lock)
-{
- struct __raw_tickets tickets = READ_ONCE(lock->tickets);
- return (tickets.next - tickets.owner) > 1;
-}
-#define arch_spin_is_contended arch_spin_is_contended
-
#endif /* __ASM_SPINLOCK_H */
@@ -6,29 +6,7 @@
# error "please don't include this file directly"
#endif
-#define TICKET_SHIFT 16
-
-typedef struct {
- union {
- u32 slock;
- struct __raw_tickets {
-#ifdef __ARMEB__
- u16 next;
- u16 owner;
-#else
- u16 owner;
- u16 next;
-#endif
- } tickets;
- };
-} arch_spinlock_t;
-
-#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
-
-typedef struct {
- u32 lock;
-} arch_rwlock_t;
-
+#include <asm-generic/qspinlock_types.h>
#include <asm-generic/qrwlock_types.h>
#endif
Use the generic queued spinlock implementation for spinlock. The WFE mechanism is used as part of arch_mcs_spin_lock_contended(). Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc> --- arch/arm/Kconfig | 1 + arch/arm/include/asm/Kbuild | 1 + arch/arm/include/asm/spinlock.h | 77 +-------------------------- arch/arm/include/asm/spinlock_types.h | 24 +-------- 4 files changed, 5 insertions(+), 98 deletions(-)