diff mbox series

[V9,08/15] riscv: cmpxchg: Forbid arch_cmpxchg64 for 32-bit

Message ID 20220808071318.3335746-9-guoren@kernel.org (mailing list archive)
State New, archived
Headers show
Series arch: Add qspinlock support and atomic cleanup | expand

Commit Message

Guo Ren Aug. 8, 2022, 7:13 a.m. UTC
From: Guo Ren <guoren@linux.alibaba.com>

RISC-V 32-bit couldn't support lr.d/sc.d instructions, so using
arch_cmpxchg64 would cause error. Add forbid code to prevent the
situation.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
 arch/riscv/include/asm/cmpxchg.h | 7 +++++++
 1 file changed, 7 insertions(+)
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 567ed2e274c4..14c9280c7f7f 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -25,6 +25,7 @@ 
 			: "memory");					\
 		break;							\
 	case 8:								\
+		BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT));			\
 		__asm__ __volatile__ (					\
 			"	amoswap.d %0, %2, %1\n"			\
 			: "=r" (__ret), "+A" (*__ptr)			\
@@ -58,6 +59,7 @@ 
 			: "memory");					\
 		break;							\
 	case 8:								\
+		BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT));			\
 		__asm__ __volatile__ (					\
 			"	amoswap.d.aqrl %0, %2, %1\n"		\
 			: "=r" (__ret), "+A" (*__ptr)			\
@@ -101,6 +103,7 @@ 
 			: "memory");					\
 		break;							\
 	case 8:								\
+		BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT));			\
 		__asm__ __volatile__ (					\
 			"0:	lr.d %0, %2\n"				\
 			"	bne %0, %z3, 1f\n"			\
@@ -146,6 +149,7 @@ 
 			: "memory");					\
 		break;							\
 	case 8:								\
+		BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT));			\
 		__asm__ __volatile__ (					\
 			"0:	lr.d %0, %2\n"				\
 			"	bne %0, %z3, 1f\n"			\
@@ -192,6 +196,7 @@ 
 			: "memory");					\
 		break;							\
 	case 8:								\
+		BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT));			\
 		__asm__ __volatile__ (					\
 			"0:	lr.d %0, %2\n"				\
 			"	bne %0, %z3, 1f\n"			\
@@ -220,6 +225,7 @@ 
 #define arch_cmpxchg_local(ptr, o, n)					\
 	(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
 
+#ifdef CONFIG_64BIT
 #define arch_cmpxchg64(ptr, o, n)					\
 ({									\
 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
@@ -231,5 +237,6 @@ 
 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
 	arch_cmpxchg_relaxed((ptr), (o), (n));				\
 })
+#endif /* CONFIG_64BIT */
 
 #endif /* _ASM_RISCV_CMPXCHG_H */