Message ID | 20230202152655.433050731@infradead.org (mailing list archive) |
---|---|
State | Not Applicable |
Delegated to: | Herbert Xu |
Headers | show |
Series | Introduce cmpxchg128() -- aka. the demise of cmpxchg_double() | expand |
On Thu, Feb 02, 2023 at 03:50:34PM +0100, Peter Zijlstra wrote: > Wire up the cmpxchg128 familty in the atomic wrappery scripts. s/familty/family/ (and s/wrappery/wrapper/ ?) > > These provide the generic cmpxchg128 family of functions from the > arch_ prefixed version, adding explicit instrumentation where needed. > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Mark Rutland <mark.rutland@arm.com> Mark. > --- > include/linux/atomic/atomic-arch-fallback.h | 95 +++++++++++++++++++++++++++- > include/linux/atomic/atomic-instrumented.h | 77 ++++++++++++++++++++++ > scripts/atomic/gen-atomic-fallback.sh | 4 - > scripts/atomic/gen-atomic-instrumented.sh | 4 - > 4 files changed, 174 insertions(+), 6 deletions(-) > > --- a/include/linux/atomic/atomic-arch-fallback.h > +++ b/include/linux/atomic/atomic-arch-fallback.h > @@ -77,6 +77,29 @@ > > #endif /* arch_cmpxchg64_relaxed */ > > +#ifndef arch_cmpxchg128_relaxed > +#define arch_cmpxchg128_acquire arch_cmpxchg128 > +#define arch_cmpxchg128_release arch_cmpxchg128 > +#define arch_cmpxchg128_relaxed arch_cmpxchg128 > +#else /* arch_cmpxchg128_relaxed */ > + > +#ifndef arch_cmpxchg128_acquire > +#define arch_cmpxchg128_acquire(...) \ > + __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__) > +#endif > + > +#ifndef arch_cmpxchg128_release > +#define arch_cmpxchg128_release(...) \ > + __atomic_op_release(arch_cmpxchg128, __VA_ARGS__) > +#endif > + > +#ifndef arch_cmpxchg128 > +#define arch_cmpxchg128(...) \ > + __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__) > +#endif > + > +#endif /* arch_cmpxchg128_relaxed */ > + > #ifndef arch_try_cmpxchg_relaxed > #ifdef arch_try_cmpxchg > #define arch_try_cmpxchg_acquire arch_try_cmpxchg > @@ -217,6 +240,76 @@ > > #endif /* arch_try_cmpxchg64_relaxed */ > > +#ifndef arch_try_cmpxchg128_relaxed > +#ifdef arch_try_cmpxchg128 > +#define arch_try_cmpxchg128_acquire arch_try_cmpxchg128 > +#define arch_try_cmpxchg128_release arch_try_cmpxchg128 > +#define arch_try_cmpxchg128_relaxed arch_try_cmpxchg128 > +#endif /* arch_try_cmpxchg128 */ > + > +#ifndef arch_try_cmpxchg128 > +#define arch_try_cmpxchg128(_ptr, _oldp, _new) \ > +({ \ > + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ > + ___r = arch_cmpxchg128((_ptr), ___o, (_new)); \ > + if (unlikely(___r != ___o)) \ > + *___op = ___r; \ > + likely(___r == ___o); \ > +}) > +#endif /* arch_try_cmpxchg128 */ > + > +#ifndef arch_try_cmpxchg128_acquire > +#define arch_try_cmpxchg128_acquire(_ptr, _oldp, _new) \ > +({ \ > + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ > + ___r = arch_cmpxchg128_acquire((_ptr), ___o, (_new)); \ > + if (unlikely(___r != ___o)) \ > + *___op = ___r; \ > + likely(___r == ___o); \ > +}) > +#endif /* arch_try_cmpxchg128_acquire */ > + > +#ifndef arch_try_cmpxchg128_release > +#define arch_try_cmpxchg128_release(_ptr, _oldp, _new) \ > +({ \ > + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ > + ___r = arch_cmpxchg128_release((_ptr), ___o, (_new)); \ > + if (unlikely(___r != ___o)) \ > + *___op = ___r; \ > + likely(___r == ___o); \ > +}) > +#endif /* arch_try_cmpxchg128_release */ > + > +#ifndef arch_try_cmpxchg128_relaxed > +#define arch_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \ > +({ \ > + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ > + ___r = arch_cmpxchg128_relaxed((_ptr), ___o, (_new)); \ > + if (unlikely(___r != ___o)) \ > + *___op = ___r; \ > + likely(___r == ___o); \ > +}) > +#endif /* arch_try_cmpxchg128_relaxed */ > + > +#else /* arch_try_cmpxchg128_relaxed */ > + > +#ifndef arch_try_cmpxchg128_acquire > +#define arch_try_cmpxchg128_acquire(...) \ > + __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__) > +#endif > + > +#ifndef arch_try_cmpxchg128_release > +#define arch_try_cmpxchg128_release(...) \ > + __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__) > +#endif > + > +#ifndef arch_try_cmpxchg128 > +#define arch_try_cmpxchg128(...) \ > + __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__) > +#endif > + > +#endif /* arch_try_cmpxchg128_relaxed */ > + > #ifndef arch_atomic_read_acquire > static __always_inline int > arch_atomic_read_acquire(const atomic_t *v) > @@ -2456,4 +2549,4 @@ arch_atomic64_dec_if_positive(atomic64_t > #endif > > #endif /* _LINUX_ATOMIC_FALLBACK_H */ > -// b5e87bdd5ede61470c29f7a7e4de781af3770f09 > +// 46357a526de89c762d30fb238f35a7d5950a670b > --- a/include/linux/atomic/atomic-instrumented.h > +++ b/include/linux/atomic/atomic-instrumented.h > @@ -1968,6 +1968,36 @@ atomic_long_dec_if_positive(atomic_long_ > arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ > }) > > +#define cmpxchg128(ptr, ...) \ > +({ \ > + typeof(ptr) __ai_ptr = (ptr); \ > + kcsan_mb(); \ > + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ > + arch_cmpxchg128(__ai_ptr, __VA_ARGS__); \ > +}) > + > +#define cmpxchg128_acquire(ptr, ...) \ > +({ \ > + typeof(ptr) __ai_ptr = (ptr); \ > + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ > + arch_cmpxchg128_acquire(__ai_ptr, __VA_ARGS__); \ > +}) > + > +#define cmpxchg128_release(ptr, ...) \ > +({ \ > + typeof(ptr) __ai_ptr = (ptr); \ > + kcsan_release(); \ > + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ > + arch_cmpxchg128_release(__ai_ptr, __VA_ARGS__); \ > +}) > + > +#define cmpxchg128_relaxed(ptr, ...) \ > +({ \ > + typeof(ptr) __ai_ptr = (ptr); \ > + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ > + arch_cmpxchg128_relaxed(__ai_ptr, __VA_ARGS__); \ > +}) > + > #define try_cmpxchg(ptr, oldp, ...) \ > ({ \ > typeof(ptr) __ai_ptr = (ptr); \ > @@ -2044,6 +2074,44 @@ atomic_long_dec_if_positive(atomic_long_ > arch_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ > }) > > +#define try_cmpxchg128(ptr, oldp, ...) \ > +({ \ > + typeof(ptr) __ai_ptr = (ptr); \ > + typeof(oldp) __ai_oldp = (oldp); \ > + kcsan_mb(); \ > + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ > + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ > + arch_try_cmpxchg128(__ai_ptr, __ai_oldp, __VA_ARGS__); \ > +}) > + > +#define try_cmpxchg128_acquire(ptr, oldp, ...) \ > +({ \ > + typeof(ptr) __ai_ptr = (ptr); \ > + typeof(oldp) __ai_oldp = (oldp); \ > + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ > + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ > + arch_try_cmpxchg128_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ > +}) > + > +#define try_cmpxchg128_release(ptr, oldp, ...) \ > +({ \ > + typeof(ptr) __ai_ptr = (ptr); \ > + typeof(oldp) __ai_oldp = (oldp); \ > + kcsan_release(); \ > + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ > + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ > + arch_try_cmpxchg128_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ > +}) > + > +#define try_cmpxchg128_relaxed(ptr, oldp, ...) \ > +({ \ > + typeof(ptr) __ai_ptr = (ptr); \ > + typeof(oldp) __ai_oldp = (oldp); \ > + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ > + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ > + arch_try_cmpxchg128_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ > +}) > + > #define cmpxchg_local(ptr, ...) \ > ({ \ > typeof(ptr) __ai_ptr = (ptr); \ > @@ -2058,6 +2126,13 @@ atomic_long_dec_if_positive(atomic_long_ > arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ > }) > > +#define cmpxchg128_local(ptr, ...) \ > +({ \ > + typeof(ptr) __ai_ptr = (ptr); \ > + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ > + arch_cmpxchg128_local(__ai_ptr, __VA_ARGS__); \ > +}) > + > #define sync_cmpxchg(ptr, ...) \ > ({ \ > typeof(ptr) __ai_ptr = (ptr); \ > @@ -2083,4 +2158,4 @@ atomic_long_dec_if_positive(atomic_long_ > }) > > #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ > -// 764f741eb77a7ad565dc8d99ce2837d5542e8aee > +// 27320c1ec2bf2878ecb9df3ea4816a7bc0c57a52 > --- a/scripts/atomic/gen-atomic-fallback.sh > +++ b/scripts/atomic/gen-atomic-fallback.sh > @@ -217,11 +217,11 @@ cat << EOF > > EOF > > -for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64"; do > +for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64" "arch_cmpxchg128"; do > gen_xchg_fallbacks "${xchg}" > done > > -for cmpxchg in "cmpxchg" "cmpxchg64"; do > +for cmpxchg in "cmpxchg" "cmpxchg64" "cmpxchg128"; do > gen_try_cmpxchg_fallbacks "${cmpxchg}" > done > > --- a/scripts/atomic/gen-atomic-instrumented.sh > +++ b/scripts/atomic/gen-atomic-instrumented.sh > @@ -166,14 +166,14 @@ grep '^[a-z]' "$1" | while read name met > done > > > -for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg" "try_cmpxchg64"; do > +for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128" "try_cmpxchg" "try_cmpxchg64" "try_cmpxchg128"; do > for order in "" "_acquire" "_release" "_relaxed"; do > gen_xchg "${xchg}" "${order}" "" > printf "\n" > done > done > > -for xchg in "cmpxchg_local" "cmpxchg64_local" "sync_cmpxchg"; do > +for xchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local" "sync_cmpxchg"; do > gen_xchg "${xchg}" "" "" > printf "\n" > done > >
--- a/include/linux/atomic/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h @@ -77,6 +77,29 @@ #endif /* arch_cmpxchg64_relaxed */ +#ifndef arch_cmpxchg128_relaxed +#define arch_cmpxchg128_acquire arch_cmpxchg128 +#define arch_cmpxchg128_release arch_cmpxchg128 +#define arch_cmpxchg128_relaxed arch_cmpxchg128 +#else /* arch_cmpxchg128_relaxed */ + +#ifndef arch_cmpxchg128_acquire +#define arch_cmpxchg128_acquire(...) \ + __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg128_release +#define arch_cmpxchg128_release(...) \ + __atomic_op_release(arch_cmpxchg128, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg128 +#define arch_cmpxchg128(...) \ + __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__) +#endif + +#endif /* arch_cmpxchg128_relaxed */ + #ifndef arch_try_cmpxchg_relaxed #ifdef arch_try_cmpxchg #define arch_try_cmpxchg_acquire arch_try_cmpxchg @@ -217,6 +240,76 @@ #endif /* arch_try_cmpxchg64_relaxed */ +#ifndef arch_try_cmpxchg128_relaxed +#ifdef arch_try_cmpxchg128 +#define arch_try_cmpxchg128_acquire arch_try_cmpxchg128 +#define arch_try_cmpxchg128_release arch_try_cmpxchg128 +#define arch_try_cmpxchg128_relaxed arch_try_cmpxchg128 +#endif /* arch_try_cmpxchg128 */ + +#ifndef arch_try_cmpxchg128 +#define arch_try_cmpxchg128(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg128((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg128 */ + +#ifndef arch_try_cmpxchg128_acquire +#define arch_try_cmpxchg128_acquire(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg128_acquire((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg128_acquire */ + +#ifndef arch_try_cmpxchg128_release +#define arch_try_cmpxchg128_release(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg128_release((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg128_release */ + +#ifndef arch_try_cmpxchg128_relaxed +#define arch_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg128_relaxed((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg128_relaxed */ + +#else /* arch_try_cmpxchg128_relaxed */ + +#ifndef arch_try_cmpxchg128_acquire +#define arch_try_cmpxchg128_acquire(...) \ + __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__) +#endif + +#ifndef arch_try_cmpxchg128_release +#define arch_try_cmpxchg128_release(...) \ + __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__) +#endif + +#ifndef arch_try_cmpxchg128 +#define arch_try_cmpxchg128(...) \ + __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__) +#endif + +#endif /* arch_try_cmpxchg128_relaxed */ + #ifndef arch_atomic_read_acquire static __always_inline int arch_atomic_read_acquire(const atomic_t *v) @@ -2456,4 +2549,4 @@ arch_atomic64_dec_if_positive(atomic64_t #endif #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// b5e87bdd5ede61470c29f7a7e4de781af3770f09 +// 46357a526de89c762d30fb238f35a7d5950a670b --- a/include/linux/atomic/atomic-instrumented.h +++ b/include/linux/atomic/atomic-instrumented.h @@ -1968,6 +1968,36 @@ atomic_long_dec_if_positive(atomic_long_ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) +#define cmpxchg128(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kcsan_mb(); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg128(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg128_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg128_acquire(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg128_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kcsan_release(); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg128_release(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg128_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg128_relaxed(__ai_ptr, __VA_ARGS__); \ +}) + #define try_cmpxchg(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -2044,6 +2074,44 @@ atomic_long_dec_if_positive(atomic_long_ arch_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) +#define try_cmpxchg128(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + kcsan_mb(); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg128(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg128_acquire(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg128_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg128_release(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + kcsan_release(); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg128_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg128_relaxed(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg128_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + #define cmpxchg_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -2058,6 +2126,13 @@ atomic_long_dec_if_positive(atomic_long_ arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ }) +#define cmpxchg128_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg128_local(__ai_ptr, __VA_ARGS__); \ +}) + #define sync_cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -2083,4 +2158,4 @@ atomic_long_dec_if_positive(atomic_long_ }) #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ -// 764f741eb77a7ad565dc8d99ce2837d5542e8aee +// 27320c1ec2bf2878ecb9df3ea4816a7bc0c57a52 --- a/scripts/atomic/gen-atomic-fallback.sh +++ b/scripts/atomic/gen-atomic-fallback.sh @@ -217,11 +217,11 @@ cat << EOF EOF -for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64"; do +for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64" "arch_cmpxchg128"; do gen_xchg_fallbacks "${xchg}" done -for cmpxchg in "cmpxchg" "cmpxchg64"; do +for cmpxchg in "cmpxchg" "cmpxchg64" "cmpxchg128"; do gen_try_cmpxchg_fallbacks "${cmpxchg}" done --- a/scripts/atomic/gen-atomic-instrumented.sh +++ b/scripts/atomic/gen-atomic-instrumented.sh @@ -166,14 +166,14 @@ grep '^[a-z]' "$1" | while read name met done -for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg" "try_cmpxchg64"; do +for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128" "try_cmpxchg" "try_cmpxchg64" "try_cmpxchg128"; do for order in "" "_acquire" "_release" "_relaxed"; do gen_xchg "${xchg}" "${order}" "" printf "\n" done done -for xchg in "cmpxchg_local" "cmpxchg64_local" "sync_cmpxchg"; do +for xchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local" "sync_cmpxchg"; do gen_xchg "${xchg}" "" "" printf "\n" done
Wire up the cmpxchg128 familty in the atomic wrappery scripts. These provide the generic cmpxchg128 family of functions from the arch_ prefixed version, adding explicit instrumentation where needed. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- include/linux/atomic/atomic-arch-fallback.h | 95 +++++++++++++++++++++++++++- include/linux/atomic/atomic-instrumented.h | 77 ++++++++++++++++++++++ scripts/atomic/gen-atomic-fallback.sh | 4 - scripts/atomic/gen-atomic-instrumented.sh | 4 - 4 files changed, 174 insertions(+), 6 deletions(-)