diff mbox series

[11/82] arm64: atomics: lse: Silence intentional wrapping addition

Message ID 20240123002814.1396804-11-keescook@chromium.org (mailing list archive)
State Changes Requested
Headers show
Series overflow: Refactor open-coded arithmetic wrap-around | expand

Commit Message

Kees Cook Jan. 23, 2024, 12:26 a.m. UTC
Annotate atomic_add_return() and atomic_sub_return() to avoid signed
overflow instrumentation. They are expected to wrap around.

Cc: Will Deacon <will@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Signed-off-by: Kees Cook <keescook@chromium.org>
---
 arch/arm64/include/asm/atomic_lse.h | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

Comments

Mark Rutland Jan. 23, 2024, 9:53 a.m. UTC | #1
On Mon, Jan 22, 2024 at 04:26:46PM -0800, Kees Cook wrote:
> Annotate atomic_add_return() and atomic_sub_return() to avoid signed
> overflow instrumentation. They are expected to wrap around.
> 
> Cc: Will Deacon <will@kernel.org>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Cc: Boqun Feng <boqun.feng@gmail.com>
> Cc: Mark Rutland <mark.rutland@arm.com>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: linux-arm-kernel@lists.infradead.org
> Signed-off-by: Kees Cook <keescook@chromium.org>
> ---
>  arch/arm64/include/asm/atomic_lse.h | 8 ++++----
>  1 file changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
> index 87f568a94e55..30572458d702 100644
> --- a/arch/arm64/include/asm/atomic_lse.h
> +++ b/arch/arm64/include/asm/atomic_lse.h
> @@ -79,13 +79,13 @@ ATOMIC_FETCH_OP_SUB(        )
>  #undef ATOMIC_FETCH_OP_SUB
>  
>  #define ATOMIC_OP_ADD_SUB_RETURN(name)					\
> -static __always_inline int						\
> +static __always_inline __signed_wrap int				\
>  __lse_atomic_add_return##name(int i, atomic_t *v)			\
>  {									\
>  	return __lse_atomic_fetch_add##name(i, v) + i;			\
>  }									\

I'd strongly prefer using add_wrap() rather than annotating the function, i.e.
make this:

  static __always_inline int						\
  __lse_atomic_add_return##name(int i, atomic_t *v)			\
  {									\
  	return add_wrap(__lse_atomic_fetch_add##name(i, v), i);		\
  }									\

Likewise for the other instances below.

With that, this looks fine to me.

Mark.

>  									\
> -static __always_inline int						\
> +static __always_inline __signed_wrap int				\
>  __lse_atomic_sub_return##name(int i, atomic_t *v)			\
>  {									\
>  	return __lse_atomic_fetch_sub(i, v) - i;			\
> @@ -186,13 +186,13 @@ ATOMIC64_FETCH_OP_SUB(        )
>  #undef ATOMIC64_FETCH_OP_SUB
>  
>  #define ATOMIC64_OP_ADD_SUB_RETURN(name)				\
> -static __always_inline long						\
> +static __always_inline __signed_wrap long				\
>  __lse_atomic64_add_return##name(s64 i, atomic64_t *v)			\
>  {									\
>  	return __lse_atomic64_fetch_add##name(i, v) + i;		\
>  }									\
>  									\
> -static __always_inline long						\
> +static __always_inline __signed_wrap long				\
>  __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)			\
>  {									\
>  	return __lse_atomic64_fetch_sub##name(i, v) - i;		\
> -- 
> 2.34.1
>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 87f568a94e55..30572458d702 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -79,13 +79,13 @@  ATOMIC_FETCH_OP_SUB(        )
 #undef ATOMIC_FETCH_OP_SUB
 
 #define ATOMIC_OP_ADD_SUB_RETURN(name)					\
-static __always_inline int						\
+static __always_inline __signed_wrap int				\
 __lse_atomic_add_return##name(int i, atomic_t *v)			\
 {									\
 	return __lse_atomic_fetch_add##name(i, v) + i;			\
 }									\
 									\
-static __always_inline int						\
+static __always_inline __signed_wrap int				\
 __lse_atomic_sub_return##name(int i, atomic_t *v)			\
 {									\
 	return __lse_atomic_fetch_sub(i, v) - i;			\
@@ -186,13 +186,13 @@  ATOMIC64_FETCH_OP_SUB(        )
 #undef ATOMIC64_FETCH_OP_SUB
 
 #define ATOMIC64_OP_ADD_SUB_RETURN(name)				\
-static __always_inline long						\
+static __always_inline __signed_wrap long				\
 __lse_atomic64_add_return##name(s64 i, atomic64_t *v)			\
 {									\
 	return __lse_atomic64_fetch_add##name(i, v) + i;		\
 }									\
 									\
-static __always_inline long						\
+static __always_inline __signed_wrap long				\
 __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)			\
 {									\
 	return __lse_atomic64_fetch_sub##name(i, v) - i;		\