diff mbox series

[RFC,07/12] percpu: Wire up cmpxchg128

Message ID 20221219154119.286760562@infradead.org (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series Introduce cmpxchg128() -- aka. the demise of cmpxchg_double() | expand

Commit Message

Peter Zijlstra Dec. 19, 2022, 3:35 p.m. UTC
In order to replace cmpxchg_double() with the newly minted
cmpxchg128() family of functions, wire it up in this_cpu_cmpxchg().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 arch/arm64/include/asm/percpu.h |   24 ++++++++++++++++++
 arch/s390/include/asm/percpu.h  |   20 +++++++++++++++
 arch/x86/include/asm/percpu.h   |   52 ++++++++++++++++++++++++++++++++++++++++
 include/asm-generic/percpu.h    |    8 ++++++
 include/linux/percpu-defs.h     |   20 +++++++++++++--
 5 files changed, 122 insertions(+), 2 deletions(-)

Comments

Arnd Bergmann Dec. 29, 2022, 1:36 p.m. UTC | #1
On Mon, Dec 19, 2022, at 16:35, Peter Zijlstra wrote:
> In order to replace cmpxchg_double() with the newly minted
> cmpxchg128() family of functions, wire it up in this_cpu_cmpxchg().
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>

Does this work on x86 chips without X86_FEATURE_CX16?

As far as I can tell, the new percpu_cmpxchg128_op uses
the cmpxchg16b instruction unconditionally without checking
for the feature bit first, and is now used by this_cpu_cmpxchg()
unconditionally as well.

This is fine for the moment if the only user is mm/slub.c
and that retains the system_has_cmpxchg128() runtime check,
but I think a better interface would be to guarantee that
this_cpu_cmpxchg() always ends up either in a working
inline asm or the generic fallback but not an invalid
opcode.

For consistency, I would also suggest this_cpu_cmpxchg() to
take the same argument types as cmpxchg(): at most 'unsigned
long' sized, with additional this_cpu_cmpxchg64() and
this_cpu_cmpxchg128() macros that take fixed-size arguments.
I have an older patch set that additionally converts all
8-bit and 16-bit cmpxchg()/xchg() calls to cmpxchg_8()/
xchg_8()/cmpxchg_16()/xchg_16() and and leaves only the
fixed-32bit and variable typed 'unsigned long' sized
callers for the weakly typed variant.

       Arnd
Heiko Carstens Jan. 4, 2023, 12:09 p.m. UTC | #2
On Mon, Dec 19, 2022 at 04:35:32PM +0100, Peter Zijlstra wrote:
> In order to replace cmpxchg_double() with the newly minted
> cmpxchg128() family of functions, wire it up in this_cpu_cmpxchg().
> 
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
...
> --- a/arch/s390/include/asm/percpu.h
> +++ b/arch/s390/include/asm/percpu.h
> +#define this_cpu_cmpxchg_16(pcp, oval, nval)				\
> +({									\
> +	u128 old__ = __pcpu_cast_128((nval), (nval));			\
> +	u128 new__ = __pcpu_cast_128((oval), (oval));			\

spot the bug... please merge the below into this patch.

diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 24a4d9d644c0..d1997d01892a 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -156,8 +156,8 @@
 
 #define this_cpu_cmpxchg_16(pcp, oval, nval)				\
 ({									\
-	u128 old__ = __pcpu_cast_128((nval), (nval));			\
-	u128 new__ = __pcpu_cast_128((oval), (oval));			\
+	u128 old__ = __pcpu_cast_128((oval), (oval));			\
+	u128 new__ = __pcpu_cast_128((nval), (nval));			\
 	typedef typeof(pcp) pcp_op_T__;					\
 	pcp_op_T__ *ptr__;						\
 	u128 ret__;							\
Peter Zijlstra Jan. 9, 2023, 4:29 p.m. UTC | #3
On Wed, Jan 04, 2023 at 01:09:16PM +0100, Heiko Carstens wrote:
> On Mon, Dec 19, 2022 at 04:35:32PM +0100, Peter Zijlstra wrote:
> > In order to replace cmpxchg_double() with the newly minted
> > cmpxchg128() family of functions, wire it up in this_cpu_cmpxchg().
> > 
> > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ...
> > --- a/arch/s390/include/asm/percpu.h
> > +++ b/arch/s390/include/asm/percpu.h
> > +#define this_cpu_cmpxchg_16(pcp, oval, nval)				\
> > +({									\
> > +	u128 old__ = __pcpu_cast_128((nval), (nval));			\
> > +	u128 new__ = __pcpu_cast_128((oval), (oval));			\
> 
> spot the bug... please merge the below into this patch.

D'oh, luckily I got a fresh pile of brown paper bags for xmas.
diff mbox series

Patch

--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -140,6 +140,10 @@  PERCPU_RET_OP(add, add, ldadd)
  * re-enabling preemption for preemptible kernels, but doing that in a way
  * which builds inside a module would mean messing directly with the preempt
  * count. If you do this, peterz and tglx will hunt you down.
+ *
+ * Not to mention it'll break the actual preemption model for missing a
+ * preemption point when TIF_NEED_RESCHED gets set while preemption is
+ * disabled.
  */
 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)		\
 ({									\
@@ -240,6 +244,26 @@  PERCPU_RET_OP(add, add, ldadd)
 #define this_cpu_cmpxchg_8(pcp, o, n)	\
 	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
 
+#define __pcpu_cast_128(_exp, _val)					\
+	_Generic((_exp),						\
+		 u128: (_val),						\
+		 s128: (_val),						\
+		 default: (unsigned long)(_val))
+
+#define this_cpu_cmpxchg_16(pcp, o, n)					\
+({									\
+	u128 old__ = __pcpu_cast_128((o), (o));				\
+	u128 new__ = __pcpu_cast_128((n), (n));				\
+	typedef typeof(pcp) pcp_op_T__;					\
+	pcp_op_T__ *ptr__;						\
+	u128 ret__;							\
+	preempt_disable_notrace();					\
+	ptr__ = raw_cpu_ptr(&(pcp));					\
+	ret__ = cmpxchg128_local((void *)ptr__, old__, new__);		\
+	preempt_enable_notrace();					\
+	(typeof(pcp))__pcpu_cast_128(*ptr__, ret__);			\
+})
+
 #ifdef __KVM_NVHE_HYPERVISOR__
 extern unsigned long __hyp_per_cpu_offset(unsigned int cpu);
 #define __per_cpu_offset
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -148,6 +148,26 @@ 
 #define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
 #define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
 
+#define __pcpu_cast_128(_exp, _val)					\
+	_Generic((_exp),						\
+		 u128: (_val),						\
+		 s128: (_val),						\
+		 default: (unsigned long)(_val))
+
+#define this_cpu_cmpxchg_16(pcp, oval, nval)				\
+({									\
+	u128 old__ = __pcpu_cast_128((nval), (nval));			\
+	u128 new__ = __pcpu_cast_128((oval), (oval));			\
+	typedef typeof(pcp) pcp_op_T__;					\
+	pcp_op_T__ *ptr__;						\
+	u128 ret__;							\
+	preempt_disable_notrace();					\
+	ptr__ = raw_cpu_ptr(&(pcp));					\
+	ret__ = cmpxchg128((void *)ptr__, old__, new__);		\
+	preempt_enable_notrace();					\
+	(typeof(pcp))__pcpu_cast_128(*ptr__, ret__);			\
+})
+
 #define arch_this_cpu_xchg(pcp, nval)					\
 ({									\
 	typeof(pcp) *ptr__;						\
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -210,6 +210,58 @@  do {									\
 	(typeof(_var))(unsigned long) pco_old__;			\
 })
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_CMPXCHG64)
+#define __pcpu_cast_64(_exp, _val)					\
+	_Generic((_exp),						\
+		 u64: (_val),						\
+		 s64: (_val),						\
+		 default: (unsigned long)(_val))
+
+#define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval)		\
+({									\
+	__pcpu_type_##size pco_old__ = __pcpu_cast_64((_oval), (_oval));\
+	__pcpu_type_##size pco_new__ = __pcpu_cast_64((_nval), (_nval));\
+	asm qual ("cmpxchg8b " __percpu_arg([var])			\
+		  : [var] "+m" (_var),					\
+		    "+A" (pco_old__)					\
+		  : "b" ((u32)pco_new__), "c" ((u32)(pco_new__ >> 32))	\
+		  : "memory");						\
+	(typeof(_var))__pcpu_cast_64(_var, pco_old__);			\
+})
+
+#define raw_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg64_op(8,         , pcp, oval, nval)
+#define this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg64_op(8, volatile, pcp, oval, nval)
+#endif
+
+#ifdef CONFIG_X86_64
+#define __pcpu_cast_128(_exp, _val)					\
+	_Generic((_exp),						\
+		 u128: (_val),						\
+		 s128: (_val),						\
+		 default: (unsigned long)(_val))
+
+#define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval)		\
+({									\
+	union __u128_halves pco_old__ = {				\
+		.full = __pcpu_cast_128((_oval), (_oval))		\
+	};								\
+	union __u128_halves pco_new__ = {				\
+		.full = __pcpu_cast_128((_nval), (_nval))		\
+	};								\
+	asm qual ("cmpxchg16b " __percpu_arg([var])			\
+		  : [var] "+m" (_var),					\
+		    "+a" (pco_old__.low),				\
+		    "+d" (pco_old__.high)				\
+		  : "b" (pco_new__.low),				\
+		    "c" (pco_new__.high)				\
+		  : "memory");						\
+	(typeof(_var))__pcpu_cast_128(_var, pco_old__.full);		\
+})
+
+#define raw_cpu_cmpxchg_16(pcp, oval, nval)	percpu_cmpxchg128_op(16,         , pcp, oval, nval)
+#define this_cpu_cmpxchg_16(pcp, oval, nval)	percpu_cmpxchg128_op(16, volatile, pcp, oval, nval)
+#endif
+
 /*
  * this_cpu_read() makes gcc load the percpu variable every time it is
  * accessed while this_cpu_read_stable() allows the value to be cached.
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -298,6 +298,10 @@  do {									\
 #define raw_cpu_cmpxchg_8(pcp, oval, nval) \
 	raw_cpu_generic_cmpxchg(pcp, oval, nval)
 #endif
+#ifndef raw_cpu_cmpxchg_16
+#define raw_cpu_cmpxchg_16(pcp, oval, nval) \
+	raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
 
 #ifndef raw_cpu_cmpxchg_double_1
 #define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
@@ -423,6 +427,10 @@  do {									\
 #define this_cpu_cmpxchg_8(pcp, oval, nval) \
 	this_cpu_generic_cmpxchg(pcp, oval, nval)
 #endif
+#ifndef this_cpu_cmpxchg_16
+#define this_cpu_cmpxchg_16(pcp, oval, nval) \
+	this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
 
 #ifndef this_cpu_cmpxchg_double_1
 #define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -343,6 +343,22 @@  static inline void __this_cpu_preempt_ch
 	pscr2_ret__;							\
 })
 
+#define __pcpu_size16_call_return2(stem, variable, ...)			\
+({									\
+	typeof(variable) pscr2_ret__;					\
+	__verify_pcpu_ptr(&(variable));					\
+	switch(sizeof(variable)) {					\
+	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\
+	case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\
+	case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\
+	case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\
+	case 16: pscr2_ret__ = stem##16(variable, __VA_ARGS__); break;	\
+	default:							\
+		__bad_size_call_parameter(); break;			\
+	}								\
+	pscr2_ret__;							\
+})
+
 /*
  * Special handling for cmpxchg_double.  cmpxchg_double is passed two
  * percpu variables.  The first has to be aligned to a double word
@@ -425,7 +441,7 @@  do {									\
 #define raw_cpu_add_return(pcp, val)	__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
 #define raw_cpu_xchg(pcp, nval)		__pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval)
 #define raw_cpu_cmpxchg(pcp, oval, nval) \
-	__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
+	__pcpu_size16_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
 #define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
 	__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
 
@@ -512,7 +528,7 @@  do {									\
 #define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
 #define this_cpu_xchg(pcp, nval)	__pcpu_size_call_return2(this_cpu_xchg_, pcp, nval)
 #define this_cpu_cmpxchg(pcp, oval, nval) \
-	__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
+	__pcpu_size16_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
 #define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
 	__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)