diff mbox series

[4/6] ARM: Use qrwlock implementation

Message ID 20191013221310.30748-5-sebastian@breakpoint.cc (mailing list archive)
State New, archived
Headers show
Series Queued spinlocks/RW-locks for ARM | expand

Commit Message

Sebastian Andrzej Siewior Oct. 13, 2019, 10:13 p.m. UTC
Use the generic qrwlock implementation for rwlock. The WFE mechanism is
used as part of the spinlock implementation.

Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
---
 arch/arm/Kconfig                      |   1 +
 arch/arm/include/asm/Kbuild           |   1 +
 arch/arm/include/asm/spinlock.h       | 143 +-------------------------
 arch/arm/include/asm/spinlock_types.h |   2 +-
 4 files changed, 5 insertions(+), 142 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 8a50efb559f35..6029d825671c6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -34,6 +34,7 @@  config ARM
 	select ARCH_SUPPORTS_ATOMIC_RMW
 	select ARCH_USE_BUILTIN_BSWAP
 	select ARCH_USE_CMPXCHG_LOCKREF
+	select ARCH_USE_QUEUED_RWLOCKS
 	select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 68ca86f85eb73..5327be7572cd2 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -18,6 +18,7 @@  generic-y += preempt.h
 generic-y += seccomp.h
 generic-y += serial.h
 generic-y += trace_clock.h
+generic-y += qrwlock.h
 
 generated-y += mach-types.h
 generated-y += unistd-nr.h
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 8f009e788ad40..f250a5022d4f6 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -52,6 +52,8 @@  static inline void dsb_sev(void)
  * release it, because V6 CPUs are assumed to have weakly ordered
  * memory.
  */
+#include <asm/qrwlock.h>
+#define smp_mb__after_spinlock()	smp_mb()
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
@@ -128,146 +130,5 @@  static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 }
 #define arch_spin_is_contended	arch_spin_is_contended
 
-/*
- * RWLOCKS
- *
- *
- * Write locks are easy - we just set bit 31.  When unlocking, we can
- * just write zero since the lock is exclusively held.
- */
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
-	unsigned long tmp;
-
-	prefetchw(&rw->lock);
-	__asm__ __volatile__(
-"1:	ldrex	%0, [%1]\n"
-"	teq	%0, #0\n"
-	WFE("ne")
-"	strexeq	%0, %2, [%1]\n"
-"	teq	%0, #0\n"
-"	bne	1b"
-	: "=&r" (tmp)
-	: "r" (&rw->lock), "r" (0x80000000)
-	: "cc");
-
-	smp_mb();
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
-	unsigned long contended, res;
-
-	prefetchw(&rw->lock);
-	do {
-		__asm__ __volatile__(
-		"	ldrex	%0, [%2]\n"
-		"	mov	%1, #0\n"
-		"	teq	%0, #0\n"
-		"	strexeq	%1, %3, [%2]"
-		: "=&r" (contended), "=&r" (res)
-		: "r" (&rw->lock), "r" (0x80000000)
-		: "cc");
-	} while (res);
-
-	if (!contended) {
-		smp_mb();
-		return 1;
-	} else {
-		return 0;
-	}
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
-	smp_mb();
-
-	__asm__ __volatile__(
-	"str	%1, [%0]\n"
-	:
-	: "r" (&rw->lock), "r" (0)
-	: "cc");
-
-	dsb_sev();
-}
-
-/*
- * Read locks are a bit more hairy:
- *  - Exclusively load the lock value.
- *  - Increment it.
- *  - Store new lock value if positive, and we still own this location.
- *    If the value is negative, we've already failed.
- *  - If we failed to store the value, we want a negative result.
- *  - If we failed, try again.
- * Unlocking is similarly hairy.  We may have multiple read locks
- * currently active.  However, we know we won't have any write
- * locks.
- */
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
-	unsigned long tmp, tmp2;
-
-	prefetchw(&rw->lock);
-	__asm__ __volatile__(
-"	.syntax unified\n"
-"1:	ldrex	%0, [%2]\n"
-"	adds	%0, %0, #1\n"
-"	strexpl	%1, %0, [%2]\n"
-	WFE("mi")
-"	rsbspl	%0, %1, #0\n"
-"	bmi	1b"
-	: "=&r" (tmp), "=&r" (tmp2)
-	: "r" (&rw->lock)
-	: "cc");
-
-	smp_mb();
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
-	unsigned long tmp, tmp2;
-
-	smp_mb();
-
-	prefetchw(&rw->lock);
-	__asm__ __volatile__(
-"1:	ldrex	%0, [%2]\n"
-"	sub	%0, %0, #1\n"
-"	strex	%1, %0, [%2]\n"
-"	teq	%1, #0\n"
-"	bne	1b"
-	: "=&r" (tmp), "=&r" (tmp2)
-	: "r" (&rw->lock)
-	: "cc");
-
-	if (tmp == 0)
-		dsb_sev();
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
-	unsigned long contended, res;
-
-	prefetchw(&rw->lock);
-	do {
-		__asm__ __volatile__(
-		"	ldrex	%0, [%2]\n"
-		"	mov	%1, #0\n"
-		"	adds	%0, %0, #1\n"
-		"	strexpl	%1, %0, [%2]"
-		: "=&r" (contended), "=&r" (res)
-		: "r" (&rw->lock)
-		: "cc");
-	} while (res);
-
-	/* If the lock is negative, then it is already held for write. */
-	if (contended < 0x80000000) {
-		smp_mb();
-		return 1;
-	} else {
-		return 0;
-	}
-}
 
 #endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 5976958647fe1..24a8a487e03b3 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -29,6 +29,6 @@  typedef struct {
 	u32 lock;
 } arch_rwlock_t;
 
-#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }
+#include <asm-generic/qrwlock_types.h>
 
 #endif