diff mbox series

[v5,09/10] arm64: atomics: Undefine internal macros after use

Message ID 20190829154834.26547-10-will@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: avoid out-of-line ll/sc atomics | expand

Commit Message

Will Deacon Aug. 29, 2019, 3:48 p.m. UTC
We use a bunch of internal macros when constructing our atomic and
cmpxchg routines in order to save on boilerplate. Avoid exposing these
directly to users of the header files.

Signed-off-by: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/atomic.h  | 7 +++++++
 arch/arm64/include/asm/cmpxchg.h | 4 ++++
 2 files changed, 11 insertions(+)

Comments

Andrew Murray Aug. 29, 2019, 11:44 p.m. UTC | #1
On Thu, Aug 29, 2019 at 04:48:33PM +0100, Will Deacon wrote:
> We use a bunch of internal macros when constructing our atomic and
> cmpxchg routines in order to save on boilerplate. Avoid exposing these
> directly to users of the header files.
> 
> Signed-off-by: Will Deacon <will@kernel.org>

Reviewed-by: Andrew Murray <andrew.murray@arm.com>

> ---
>  arch/arm64/include/asm/atomic.h  | 7 +++++++
>  arch/arm64/include/asm/cmpxchg.h | 4 ++++
>  2 files changed, 11 insertions(+)
> 
> diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
> index 7c334337674d..916e5a6d5454 100644
> --- a/arch/arm64/include/asm/atomic.h
> +++ b/arch/arm64/include/asm/atomic.h
> @@ -32,6 +32,7 @@ ATOMIC_OP(atomic_add)
>  ATOMIC_OP(atomic_and)
>  ATOMIC_OP(atomic_sub)
>  
> +#undef ATOMIC_OP
>  
>  #define ATOMIC_FETCH_OP(name, op)					\
>  static inline int arch_##op##name(int i, atomic_t *v)			\
> @@ -54,6 +55,8 @@ ATOMIC_FETCH_OPS(atomic_fetch_sub)
>  ATOMIC_FETCH_OPS(atomic_add_return)
>  ATOMIC_FETCH_OPS(atomic_sub_return)
>  
> +#undef ATOMIC_FETCH_OP
> +#undef ATOMIC_FETCH_OPS
>  
>  #define ATOMIC64_OP(op)							\
>  static inline void arch_##op(long i, atomic64_t *v)			\
> @@ -68,6 +71,7 @@ ATOMIC64_OP(atomic64_add)
>  ATOMIC64_OP(atomic64_and)
>  ATOMIC64_OP(atomic64_sub)
>  
> +#undef ATOMIC64_OP
>  
>  #define ATOMIC64_FETCH_OP(name, op)					\
>  static inline long arch_##op##name(long i, atomic64_t *v)		\
> @@ -90,6 +94,9 @@ ATOMIC64_FETCH_OPS(atomic64_fetch_sub)
>  ATOMIC64_FETCH_OPS(atomic64_add_return)
>  ATOMIC64_FETCH_OPS(atomic64_sub_return)
>  
> +#undef ATOMIC64_FETCH_OP
> +#undef ATOMIC64_FETCH_OPS
> +
>  static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
>  {
>  	return __lse_ll_sc_body(atomic64_dec_if_positive, v);
> diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
> index afaba73e0b2c..a1398f2f9994 100644
> --- a/arch/arm64/include/asm/cmpxchg.h
> +++ b/arch/arm64/include/asm/cmpxchg.h
> @@ -129,6 +129,8 @@ __CMPXCHG_CASE(mb_, 16)
>  __CMPXCHG_CASE(mb_, 32)
>  __CMPXCHG_CASE(mb_, 64)
>  
> +#undef __CMPXCHG_CASE
> +
>  #define __CMPXCHG_DBL(name)						\
>  static inline long __cmpxchg_double##name(unsigned long old1,		\
>  					 unsigned long old2,		\
> @@ -143,6 +145,8 @@ static inline long __cmpxchg_double##name(unsigned long old1,		\
>  __CMPXCHG_DBL(   )
>  __CMPXCHG_DBL(_mb)
>  
> +#undef __CMPXCHG_DBL
> +
>  #define __CMPXCHG_GEN(sfx)						\
>  static inline unsigned long __cmpxchg##sfx(volatile void *ptr,		\
>  					   unsigned long old,		\
> -- 
> 2.11.0
>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 7c334337674d..916e5a6d5454 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -32,6 +32,7 @@  ATOMIC_OP(atomic_add)
 ATOMIC_OP(atomic_and)
 ATOMIC_OP(atomic_sub)
 
+#undef ATOMIC_OP
 
 #define ATOMIC_FETCH_OP(name, op)					\
 static inline int arch_##op##name(int i, atomic_t *v)			\
@@ -54,6 +55,8 @@  ATOMIC_FETCH_OPS(atomic_fetch_sub)
 ATOMIC_FETCH_OPS(atomic_add_return)
 ATOMIC_FETCH_OPS(atomic_sub_return)
 
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_FETCH_OPS
 
 #define ATOMIC64_OP(op)							\
 static inline void arch_##op(long i, atomic64_t *v)			\
@@ -68,6 +71,7 @@  ATOMIC64_OP(atomic64_add)
 ATOMIC64_OP(atomic64_and)
 ATOMIC64_OP(atomic64_sub)
 
+#undef ATOMIC64_OP
 
 #define ATOMIC64_FETCH_OP(name, op)					\
 static inline long arch_##op##name(long i, atomic64_t *v)		\
@@ -90,6 +94,9 @@  ATOMIC64_FETCH_OPS(atomic64_fetch_sub)
 ATOMIC64_FETCH_OPS(atomic64_add_return)
 ATOMIC64_FETCH_OPS(atomic64_sub_return)
 
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_FETCH_OPS
+
 static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
 {
 	return __lse_ll_sc_body(atomic64_dec_if_positive, v);
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index afaba73e0b2c..a1398f2f9994 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -129,6 +129,8 @@  __CMPXCHG_CASE(mb_, 16)
 __CMPXCHG_CASE(mb_, 32)
 __CMPXCHG_CASE(mb_, 64)
 
+#undef __CMPXCHG_CASE
+
 #define __CMPXCHG_DBL(name)						\
 static inline long __cmpxchg_double##name(unsigned long old1,		\
 					 unsigned long old2,		\
@@ -143,6 +145,8 @@  static inline long __cmpxchg_double##name(unsigned long old1,		\
 __CMPXCHG_DBL(   )
 __CMPXCHG_DBL(_mb)
 
+#undef __CMPXCHG_DBL
+
 #define __CMPXCHG_GEN(sfx)						\
 static inline unsigned long __cmpxchg##sfx(volatile void *ptr,		\
 					   unsigned long old,		\