diff mbox series

[3/6] ARM: Add xchg_{8|16}() on generic cmpxchg() on CPU_V6

Message ID 20191013221310.30748-4-sebastian@breakpoint.cc (mailing list archive)
State New, archived
Headers show
Series Queued spinlocks/RW-locks for ARM | expand

Commit Message

Sebastian Andrzej Siewior Oct. 13, 2019, 10:13 p.m. UTC
Use generic xchg_u{8|16}()to implement the function based on cmpxchg().
The generic header file expects __cmpxchg_u32() to perform a 32bit
cmpxchg operation.

Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
---
 arch/arm/include/asm/cmpxchg.h | 20 +++++++++++++++++++-
 1 file changed, 19 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index c61de6acf41ed..06e8b1c7a08fe 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -181,6 +181,16 @@  static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
 #define swp_is_buggy
 #endif
 
+#ifdef CONFIG_CPU_V6
+static inline unsigned int __cmpxchg_u32(volatile void *ptr, unsigned int old,
+					 unsigned int new)
+{
+	return __cmpxchg(ptr, old, new, sizeof(unsigned int));
+}
+
+#include <asm-generic/cmpxchg-xchg.h>
+#endif
+
 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
 {
 	extern void __bad_xchg(volatile void *, int);
@@ -196,7 +206,15 @@  static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
 
 	switch (size) {
 #if __LINUX_ARM_ARCH__ >= 6
-#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
+#ifdef CONFIG_CPU_V6
+	case 1:
+		ret = xchg_u8(ptr, x);
+		break;
+	case 2:
+		ret = xchg_u16(ptr, x);
+		break;
+
+#else /* MIN ARCH >= V6K */
 	case 1:
 		asm volatile("@	__xchg1\n"
 		"1:	ldrexb	%0, [%3]\n"