diff mbox series

[v2,4/4] locking/atomic: hexagon: arch_cmpxchg[64]_local undefined

Message ID 20231104091615.4884-5-wuqiang.matt@bytedance.com (mailing list archive)
State Handled Elsewhere
Headers show
Series locking/atomic: arch_cmpxchg[64]_local undefined | expand

Commit Message

wuqiang.matt Nov. 4, 2023, 9:16 a.m. UTC
For architectures that support native cmpxchg, we'd like to
implement arch_cmpxchg[64]_local with the native variants of
supported data size. If not, the generci_cmpxchg[64]_local
will be used.

Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202310272207.tLPflya4-lkp@intel.com/

Signed-off-by: wuqiang.matt <wuqiang.matt@bytedance.com>
---
 arch/hexagon/include/asm/cmpxchg.h | 51 +++++++++++++++++++++++++++++-
 1 file changed, 50 insertions(+), 1 deletion(-)

Comments

Masami Hiramatsu (Google) Nov. 8, 2023, 1:04 a.m. UTC | #1
On Sat,  4 Nov 2023 17:16:15 +0800
"wuqiang.matt" <wuqiang.matt@bytedance.com> wrote:

> For architectures that support native cmpxchg, we'd like to
> implement arch_cmpxchg[64]_local with the native variants of
> supported data size. If not, the generci_cmpxchg[64]_local
> will be used.
> 
> Reported-by: kernel test robot <lkp@intel.com>
> Closes: https://lore.kernel.org/oe-kbuild-all/202310272207.tLPflya4-lkp@intel.com/
> 

Looks good to me.

Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>

But I need hexagon's maintainer's comment too.

Thank you,

> Signed-off-by: wuqiang.matt <wuqiang.matt@bytedance.com>
> ---
>  arch/hexagon/include/asm/cmpxchg.h | 51 +++++++++++++++++++++++++++++-
>  1 file changed, 50 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
> index bf6cf5579cf4..2b5e5bbaf807 100644
> --- a/arch/hexagon/include/asm/cmpxchg.h
> +++ b/arch/hexagon/include/asm/cmpxchg.h
> @@ -8,6 +8,8 @@
>  #ifndef _ASM_CMPXCHG_H
>  #define _ASM_CMPXCHG_H
>  
> +#include <linux/build_bug.h>
> +
>  /*
>   * __arch_xchg - atomically exchange a register and a memory location
>   * @x: value to swap
> @@ -51,13 +53,15 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
>   *  variable casting.
>   */
>  
> -#define arch_cmpxchg(ptr, old, new)				\
> +#define __cmpxchg_32(ptr, old, new)				\
>  ({								\
>  	__typeof__(ptr) __ptr = (ptr);				\
>  	__typeof__(*(ptr)) __old = (old);			\
>  	__typeof__(*(ptr)) __new = (new);			\
>  	__typeof__(*(ptr)) __oldval = 0;			\
>  								\
> +	BUILD_BUG_ON(sizeof(*(ptr)) != 4);			\
> +								\
>  	asm volatile(						\
>  		"1:	%0 = memw_locked(%1);\n"		\
>  		"	{ P0 = cmp.eq(%0,%2);\n"		\
> @@ -72,4 +76,49 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
>  	__oldval;						\
>  })
>  
> +#define __cmpxchg(ptr, old, val, size)				\
> +({								\
> +	__typeof__(*(ptr)) oldval;				\
> +								\
> +	switch (size) {						\
> +	case 4:							\
> +		oldval = __cmpxchg_32(ptr, old, val);		\
> +		break;						\
> +	default:						\
> +		BUILD_BUG();					\
> +		oldval = val;					\
> +		break;						\
> +	}							\
> +								\
> +	oldval;							\
> +})
> +
> +#define arch_cmpxchg(ptr, o, n)	__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))
> +
> +/*
> + * always make arch_cmpxchg[64]_local available, native cmpxchg
> + * will be used if available, then generic_cmpxchg[64]_local
> + */
> +#include <asm-generic/cmpxchg-local.h>
> +
> +#define arch_cmpxchg_local(ptr, old, val)			\
> +({								\
> +	__typeof__(*(ptr)) retval;				\
> +	int size = sizeof(*(ptr));				\
> +								\
> +	switch (size) {						\
> +	case 4:							\
> +		retval = __cmpxchg_32(ptr, old, val);		\
> +		break;						\
> +	default:						\
> +		retval = __generic_cmpxchg_local(ptr, old,	\
> +						 val, size);	\
> +		break;						\
> +	}							\
> +								\
> +	retval;							\
> +})
> +
> +#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
> +
>  #endif /* _ASM_CMPXCHG_H */
> -- 
> 2.40.1
>
diff mbox series

Patch

diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index bf6cf5579cf4..2b5e5bbaf807 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -8,6 +8,8 @@ 
 #ifndef _ASM_CMPXCHG_H
 #define _ASM_CMPXCHG_H
 
+#include <linux/build_bug.h>
+
 /*
  * __arch_xchg - atomically exchange a register and a memory location
  * @x: value to swap
@@ -51,13 +53,15 @@  __arch_xchg(unsigned long x, volatile void *ptr, int size)
  *  variable casting.
  */
 
-#define arch_cmpxchg(ptr, old, new)				\
+#define __cmpxchg_32(ptr, old, new)				\
 ({								\
 	__typeof__(ptr) __ptr = (ptr);				\
 	__typeof__(*(ptr)) __old = (old);			\
 	__typeof__(*(ptr)) __new = (new);			\
 	__typeof__(*(ptr)) __oldval = 0;			\
 								\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 4);			\
+								\
 	asm volatile(						\
 		"1:	%0 = memw_locked(%1);\n"		\
 		"	{ P0 = cmp.eq(%0,%2);\n"		\
@@ -72,4 +76,49 @@  __arch_xchg(unsigned long x, volatile void *ptr, int size)
 	__oldval;						\
 })
 
+#define __cmpxchg(ptr, old, val, size)				\
+({								\
+	__typeof__(*(ptr)) oldval;				\
+								\
+	switch (size) {						\
+	case 4:							\
+		oldval = __cmpxchg_32(ptr, old, val);		\
+		break;						\
+	default:						\
+		BUILD_BUG();					\
+		oldval = val;					\
+		break;						\
+	}							\
+								\
+	oldval;							\
+})
+
+#define arch_cmpxchg(ptr, o, n)	__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))
+
+/*
+ * always make arch_cmpxchg[64]_local available, native cmpxchg
+ * will be used if available, then generic_cmpxchg[64]_local
+ */
+#include <asm-generic/cmpxchg-local.h>
+
+#define arch_cmpxchg_local(ptr, old, val)			\
+({								\
+	__typeof__(*(ptr)) retval;				\
+	int size = sizeof(*(ptr));				\
+								\
+	switch (size) {						\
+	case 4:							\
+		retval = __cmpxchg_32(ptr, old, val);		\
+		break;						\
+	default:						\
+		retval = __generic_cmpxchg_local(ptr, old,	\
+						 val, size);	\
+		break;						\
+	}							\
+								\
+	retval;							\
+})
+
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+
 #endif /* _ASM_CMPXCHG_H */