diff mbox

[RFC,v2,12/13] x86: implementation for HARDENED_ATOMIC

Message ID 1476959131-6153-13-git-send-email-elena.reshetova@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Reshetova, Elena Oct. 20, 2016, 10:25 a.m. UTC
This adds x86-specific code in order to support
HARDENED_ATOMIC feature. When overflow is detected
in atomic_t or atomic_long_t types, the counter is
decremented back by one (to keep it at INT_MAX or
LONG_MAX) and issue is reported using BUG().
The side effect is that in both legitimate and
non-legitimate cases a counter cannot wrap.

Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: David Windsor <dwindsor@gmail.com>
---
 arch/x86/Kconfig                   |   1 +
 arch/x86/include/asm/atomic.h      | 323 +++++++++++++++++++++++++++++++++++--
 arch/x86/include/asm/atomic64_32.h | 201 ++++++++++++++++++++++-
 arch/x86/include/asm/atomic64_64.h | 228 +++++++++++++++++++++++++-
 arch/x86/include/asm/bitops.h      |   8 +-
 arch/x86/include/asm/cmpxchg.h     |  39 +++++
 arch/x86/include/asm/local.h       |  89 +++++++++-
 arch/x86/include/asm/preempt.h     |   2 +-
 arch/x86/include/asm/rmwcc.h       |  82 ++++++++--
 arch/x86/include/asm/rwsem.h       |  50 ++++++
 arch/x86/kernel/traps.c            |   4 +
 arch/x86/lib/atomic64_386_32.S     | 135 ++++++++++++++++
 arch/x86/lib/atomic64_cx8_32.S     |  78 ++++++++-
 13 files changed, 1194 insertions(+), 46 deletions(-)

Comments

AKASHI Takahiro Oct. 26, 2016, 5:06 a.m. UTC | #1
Elena,

On Thu, Oct 20, 2016 at 01:25:30PM +0300, Elena Reshetova wrote:
> This adds x86-specific code in order to support
> HARDENED_ATOMIC feature. When overflow is detected
> in atomic_t or atomic_long_t types, the counter is
> decremented back by one (to keep it at INT_MAX or

That's fine, but

> LONG_MAX) and issue is reported using BUG().
> The side effect is that in both legitimate and
> non-legitimate cases a counter cannot wrap.
> 
> Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
> Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
> Signed-off-by: David Windsor <dwindsor@gmail.com>
> ---
>  arch/x86/Kconfig                   |   1 +
>  arch/x86/include/asm/atomic.h      | 323 +++++++++++++++++++++++++++++++++++--
>  arch/x86/include/asm/atomic64_32.h | 201 ++++++++++++++++++++++-
>  arch/x86/include/asm/atomic64_64.h | 228 +++++++++++++++++++++++++-
>  arch/x86/include/asm/bitops.h      |   8 +-
>  arch/x86/include/asm/cmpxchg.h     |  39 +++++
>  arch/x86/include/asm/local.h       |  89 +++++++++-
>  arch/x86/include/asm/preempt.h     |   2 +-
>  arch/x86/include/asm/rmwcc.h       |  82 ++++++++--
>  arch/x86/include/asm/rwsem.h       |  50 ++++++
>  arch/x86/kernel/traps.c            |   4 +
>  arch/x86/lib/atomic64_386_32.S     | 135 ++++++++++++++++
>  arch/x86/lib/atomic64_cx8_32.S     |  78 ++++++++-
>  13 files changed, 1194 insertions(+), 46 deletions(-)
> 
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 402eee4..6c36184 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -79,6 +79,7 @@ config X86
>  	select HAVE_AOUT			if X86_32
>  	select HAVE_ARCH_AUDITSYSCALL
>  	select HAVE_ARCH_HARDENED_USERCOPY
> +	select HAVE_ARCH_HARDENED_ATOMIC
>  	select HAVE_ARCH_HUGE_VMAP		if X86_64 || X86_PAE
>  	select HAVE_ARCH_JUMP_LABEL
>  	select HAVE_ARCH_KASAN			if X86_64 && SPARSEMEM_VMEMMAP
> diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
> index 14635c5..4a35c9b 100644
> --- a/arch/x86/include/asm/atomic.h
> +++ b/arch/x86/include/asm/atomic.h
> @@ -27,6 +27,17 @@ static __always_inline int atomic_read(const atomic_t *v)
>  }
>  
>  /**
> + * atomic_read_wrap - read atomic variable
> + * @v: pointer of type atomic_wrap_t
> + *
> + * Atomically reads the value of @v.
> + */
> +static __always_inline int atomic_read_wrap(const atomic_wrap_t *v)
> +{
> +	return ACCESS_ONCE((v)->counter);
> +}
> +
> +/**
>   * atomic_set - set atomic variable
>   * @v: pointer of type atomic_t
>   * @i: required value
> @@ -39,6 +50,18 @@ static __always_inline void atomic_set(atomic_t *v, int i)
>  }
>  
>  /**
> + * atomic_set_wrap - set atomic variable
> + * @v: pointer of type atomic_wrap_t
> + * @i: required value
> + *
> + * Atomically sets the value of @v to @i.
> + */
> +static __always_inline void atomic_set_wrap(atomic_wrap_t *v, int i)
> +{
> +	v->counter = i;
> +}
> +
> +/**
>   * atomic_add - add integer to atomic variable
>   * @i: integer value to add
>   * @v: pointer of type atomic_t
> @@ -47,12 +70,55 @@ static __always_inline void atomic_set(atomic_t *v, int i)
>   */
>  static __always_inline void atomic_add(int i, atomic_t *v)
>  {
> -	asm volatile(LOCK_PREFIX "addl %1,%0"
> +	asm volatile(LOCK_PREFIX "addl %1,%0\n"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     LOCK_PREFIX "subl %1,%0\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +
>  		     : "+m" (v->counter)
>  		     : "ir" (i));
>  }
>  
>  /**
> + * atomic_add_wrap - add integer to atomic variable
> + * @i: integer value to add
> + * @v: pointer of type atomic_wrap_t
> + *
> + * Atomically adds @i to @v.
> + */
> +static __always_inline void atomic_add_wrap(int i, atomic_wrap_t *v)
> +{
> +	asm volatile(LOCK_PREFIX "addl %1,%0\n"
> +		     : "+m" (v->counter)
> +		     : "ir" (i));
> +}
> +
> +/**
> + * atomic_add_and_test - add value from variable and test result
> + * @i: integer value to add
> + * @v: pointer of type atomic_t
> + *
> + * Atomically adds @i from @v and returns
> + * true if the result is zero, or false for all
> + * other cases.
> + */
> +static __always_inline bool atomic_add_and_test(int i, atomic_t *v)
> +{
> +	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
> +}
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static __always_inline bool atomic_add_and_test_wrap(int i, atomic_wrap_t *v)
> +{
> +	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addl", v->counter, "er", i, "%0", e);
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
> +
> +/**
>   * atomic_sub - subtract integer from atomic variable
>   * @i: integer value to subtract
>   * @v: pointer of type atomic_t
> @@ -61,7 +127,29 @@ static __always_inline void atomic_add(int i, atomic_t *v)
>   */
>  static __always_inline void atomic_sub(int i, atomic_t *v)
>  {
> -	asm volatile(LOCK_PREFIX "subl %1,%0"
> +	asm volatile(LOCK_PREFIX "subl %1,%0\n"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     LOCK_PREFIX "addl %1,%0\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +
> +		     : "+m" (v->counter)
> +		     : "ir" (i));
> +}
> +
> +/**
> + * atomic_sub_wrap - subtract integer from atomic variable
> + * @i: integer value to subtract
> + * @v: pointer of type atomic_wrap_t
> + *
> + * Atomically subtracts @i from @v.
> + */
> +static __always_inline void atomic_sub_wrap(int i, atomic_wrap_t *v)
> +{
> +	asm volatile(LOCK_PREFIX "subl %1,%0\n"
>  		     : "+m" (v->counter)
>  		     : "ir" (i));
>  }
> @@ -77,7 +165,21 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
>   */
>  static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
>  {
> -	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
> +	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", e);
> +}
> +
> +/**
> + * atomic_sub_and_test_wrap - subtract value from variable and test result
> + * @i: integer value to subtract
> + * @v: pointer of type atomic_wrap_t
> + *
> + * Atomically subtracts @i from @v and returns
> + * true if the result is zero, or false for all
> + * other cases.
> + */
> +static __always_inline bool atomic_sub_and_test_wrap(int i, atomic_wrap_t *v)
> +{
> +	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
>  }
>  
>  /**
> @@ -88,7 +190,27 @@ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
>   */
>  static __always_inline void atomic_inc(atomic_t *v)
>  {
> -	asm volatile(LOCK_PREFIX "incl %0"
> +	asm volatile(LOCK_PREFIX "incl %0\n"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     LOCK_PREFIX "decl %0\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +
> +		     : "+m" (v->counter));
> +}
> +
> +/**
> + * atomic_inc_wrap - increment atomic variable
> + * @v: pointer of type atomic_wrap_t
> + *
> + * Atomically increments @v by 1.
> + */
> +static __always_inline void atomic_inc_wrap(atomic_wrap_t *v)
> +{
> +	asm volatile(LOCK_PREFIX "incl %0\n"
>  		     : "+m" (v->counter));
>  }
>  
> @@ -100,7 +222,27 @@ static __always_inline void atomic_inc(atomic_t *v)
>   */
>  static __always_inline void atomic_dec(atomic_t *v)
>  {
> -	asm volatile(LOCK_PREFIX "decl %0"
> +	asm volatile(LOCK_PREFIX "decl %0\n"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     LOCK_PREFIX "incl %0\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +
> +		     : "+m" (v->counter));
> +}
> +
> +/**
> + * atomic_dec_wrap - decrement atomic variable
> + * @v: pointer of type atomic_wrap_t
> + *
> + * Atomically decrements @v by 1.
> + */
> +static __always_inline void atomic_dec_wrap(atomic_wrap_t *v)
> +{
> +	asm volatile(LOCK_PREFIX "decl %0\n"
>  		     : "+m" (v->counter));
>  }
>  
> @@ -114,9 +256,16 @@ static __always_inline void atomic_dec(atomic_t *v)
>   */
>  static __always_inline bool atomic_dec_and_test(atomic_t *v)
>  {
> -	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
> +	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", e);
>  }
>  
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static __always_inline bool atomic_dec_and_test_wrap(atomic_wrap_t *v)
> +{
> +	GEN_UNARY_RMWcc_wrap(LOCK_PREFIX "decl", v->counter, "%0", e);
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
> +
>  /**
>   * atomic_inc_and_test - increment and test
>   * @v: pointer of type atomic_t
> @@ -127,7 +276,20 @@ static __always_inline bool atomic_dec_and_test(atomic_t *v)
>   */
>  static __always_inline bool atomic_inc_and_test(atomic_t *v)
>  {
> -	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
> +	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", e);
> +}
> +
> +/**
> + * atomic_inc_and_test_wrap - increment and test
> + * @v: pointer of type atomic_wrap_t
> + *
> + * Atomically increments @v by 1
> + * and returns true if the result is zero, or false for all
> + * other cases.
> + */
> +static __always_inline int atomic_inc_and_test_wrap(atomic_wrap_t *v)
> +{
> +	GEN_UNARY_RMWcc_wrap(LOCK_PREFIX "incl", v->counter, "%0", e);
>  }
>  
>  /**
> @@ -141,9 +303,16 @@ static __always_inline bool atomic_inc_and_test(atomic_t *v)
>   */
>  static __always_inline bool atomic_add_negative(int i, atomic_t *v)
>  {
> -	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
> +	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", s);
>  }
>  
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static __always_inline bool atomic_add_negative_wrap(int i, atomic_wrap_t *v)
> +{
> +	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
> +
>  /**
>   * atomic_add_return - add integer and return
>   * @i: integer value to add
> @@ -153,6 +322,18 @@ static __always_inline bool atomic_add_negative(int i, atomic_t *v)
>   */
>  static __always_inline int atomic_add_return(int i, atomic_t *v)
>  {
> +	return i + xadd_check_overflow(&v->counter, i);
> +}

If overflow, should this function still return i + v->counter?
(The caller would die anyway, though.)

> +
> +/**
> + * atomic_add_return_wrap - add integer and return
> + * @i: integer value to add
> + * @v: pointer of type atomic_wrap_t
> + *
> + * Atomically adds @i to @v and returns @i + @v
> + */
> +static __always_inline int atomic_add_return_wrap(int i, atomic_wrap_t *v)
> +{
>  	return i + xadd(&v->counter, i);
>  }
>  
> @@ -168,8 +349,26 @@ static __always_inline int atomic_sub_return(int i, atomic_t *v)
>  	return atomic_add_return(-i, v);
>  }
>  
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static __always_inline int atomic_sub_return_wrap(int i, atomic_wrap_t *v)
> +{
> +	return atomic_add_return_wrap(-i, v);
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
> +
>  #define atomic_inc_return(v)  (atomic_add_return(1, v))
> +static __always_inline int atomic_inc_return_wrap(atomic_wrap_t *v)
> +{
> +	return atomic_add_return_wrap(1, v);
> +}
> +
>  #define atomic_dec_return(v)  (atomic_sub_return(1, v))
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static __always_inline int atomic_dec_return_wrap(atomic_wrap_t *v)
> +{
> +	return atomic_sub_return_wrap(1, v);
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
>  
>  static __always_inline int atomic_fetch_add(int i, atomic_t *v)
>  {

and atomic_fetch_add/sub() should do

        return xadd_check_overflow((+/-)i, v);

> @@ -186,11 +385,21 @@ static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
>  	return cmpxchg(&v->counter, old, new);
>  }
>  
> +static __always_inline int atomic_cmpxchg_wrap(atomic_wrap_t *v, int old, int new)
> +{
> +	return cmpxchg(&v->counter, old, new);
> +}
> +
>  static inline int atomic_xchg(atomic_t *v, int new)
>  {
>  	return xchg(&v->counter, new);
>  }
>  
> +static inline int atomic_xchg_wrap(atomic_wrap_t *v, int new)
> +{
> +	return xchg(&v->counter, new);
> +}
> +
>  #define ATOMIC_OP(op)							\
>  static inline void atomic_##op(int i, atomic_t *v)			\
>  {									\
> @@ -236,12 +445,25 @@ ATOMIC_OPS(xor, ^)
>   */
>  static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
>  {
> -	int c, old;
> +	int c, old, new;
>  	c = atomic_read(v);
>  	for (;;) {
>  		if (unlikely(c == (u)))
>  			break;
> -		old = atomic_cmpxchg((v), c, c + (a));
> +
> +		asm volatile("addl %2,%0\n"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +			     "jno 0f\n"
> +			     "subl %2,%0\n"
> +			     "int $4\n0:\n"
> +			     _ASM_EXTABLE(0b, 0b)
> +#endif
> +
> +			     : "=r" (new)
> +			     : "0" (c), "ir" (a));
> +
> +		old = atomic_cmpxchg((v), c, new);
>  		if (likely(old == c))
>  			break;
>  		c = old;
> @@ -250,6 +472,87 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
>  }
>  
>  /**
> + * __atomic_add_unless__wrap - add unless the number is already a given value
> + * @v: pointer of type atomic_wrap_t
> + * @a: the amount to add to v...
> + * @u: ...unless v is equal to u.
> + *
> + * Atomically adds @a to @v, so long as @v was not already @u.
> + * Returns the old value of @v.
> + */
> +static __always_inline int __atomic_add_unless_wrap(atomic_wrap_t *v,
> +						    int a, int u)
> +{
> +	int c, old, new;
> +	c = atomic_read_wrap(v);
> +	for (;;) {
> +		if (unlikely(c == (u)))
> +			break;
> +
> +		asm volatile("addl %2,%0\n"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +			     "jno 0f\n"
> +			     "subl %2,%0\n"
> +			     "int $4\n0:\n"
> +			     _ASM_EXTABLE(0b, 0b)
> +#endif

Is this a mistake? We don't need a check here.

> +
> +			     : "=r" (new)
> +			     : "0" (c), "ir" (a));
> +
> +		old = atomic_cmpxchg_wrap((v), c, new);
> +		if (likely(old == c))
> +			break;
> +		c = old;
> +	}
> +	return c;
> +}
> +
> +/**
> ++ * atomic_inc_not_zero_hint - increment if not null
> ++ * @v: pointer of type atomic_t
> ++ * @hint: probable value of the atomic before the increment
> ++ *
> ++ * This version of atomic_inc_not_zero() gives a hint of probable
> ++ * value of the atomic. This helps processor to not read the memory
> ++ * before doing the atomic read/modify/write cycle, lowering
> ++ * number of bus transactions on some arches.
> ++ *
> ++ * Returns: 0 if increment was not done, 1 otherwise.
> ++ */
> +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
> +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
> +{
> +	int val, c = hint, new;
> +
> +	/* sanity test, should be removed by compiler if hint is a constant */
> +	if (!hint)
> +		return __atomic_add_unless(v, 1, 0);
> +
> +	do {
> +		asm volatile("incl %0\n"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +			     "jno 0f\n"
> +			     "decl %0\n"
> +			     "int $4\n0:\n"
> +			     _ASM_EXTABLE(0b, 0b)
> +#endif
> +
> +			     : "=r" (new)
> +			     : "0" (c));
> +
> +		val = atomic_cmpxchg((v), c, new);
> +		if (val == c)
> +			return 1;
> +		c = val;
> +	} while (c);
> +
> +	return 0;
> +}
> +
> +/**
>   * atomic_inc_short - increment of a short integer
>   * @v: pointer to type int
>   *
> diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
> index 71d7705..7c88320 100644
> --- a/arch/x86/include/asm/atomic64_32.h
> +++ b/arch/x86/include/asm/atomic64_32.h
> @@ -11,6 +11,14 @@ typedef struct {
>  	u64 __aligned(8) counter;
>  } atomic64_t;
>  
> +#ifdef CONFIG_HARDENED_ATOMIC
> +typedef struct {
> +	u64 __aligned(8) counter;
> +} atomic64_wrap_t;
> +#else
> +typedef atomic64_t atomic64_wrap_t;
> +#endif
> +
>  #define ATOMIC64_INIT(val)	{ (val) }
>  
>  #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
> @@ -36,21 +44,31 @@ typedef struct {
>  	ATOMIC64_DECL_ONE(sym##_386)
>  
>  ATOMIC64_DECL_ONE(add_386);
> +ATOMIC64_DECL_ONE(add_wrap_386);
>  ATOMIC64_DECL_ONE(sub_386);
> +ATOMIC64_DECL_ONE(sub_wrap_386);
>  ATOMIC64_DECL_ONE(inc_386);
> +ATOMIC64_DECL_ONE(inc_wrap_386);
>  ATOMIC64_DECL_ONE(dec_386);
> +ATOMIC64_DECL_ONE(dec_wrap_386);
>  #endif
>  
>  #define alternative_atomic64(f, out, in...) \
>  	__alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
>  
>  ATOMIC64_DECL(read);
> +ATOMIC64_DECL(read_wrap);
>  ATOMIC64_DECL(set);
> +ATOMIC64_DECL(set_wrap);
>  ATOMIC64_DECL(xchg);
>  ATOMIC64_DECL(add_return);
> +ATOMIC64_DECL(add_return_wrap);
>  ATOMIC64_DECL(sub_return);
> +ATOMIC64_DECL(sub_return_wrap);
>  ATOMIC64_DECL(inc_return);
> +ATOMIC64_DECL(inc_return_wrap);
>  ATOMIC64_DECL(dec_return);
> +ATOMIC64_DECL(dec_return_wrap);
>  ATOMIC64_DECL(dec_if_positive);
>  ATOMIC64_DECL(inc_not_zero);
>  ATOMIC64_DECL(add_unless);
> @@ -76,6 +94,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
>  }
>  
>  /**
> + * atomic64_cmpxchg_wrap - cmpxchg atomic64 variable
> + * @p: pointer to type atomic64_wrap_t
> + * @o: expected value
> + * @n: new value
> + *
> + * Atomically sets @v to @n if it was equal to @o and returns
> + * the old value.
> + */
> +
> +static inline long long atomic64_cmpxchg_wrap(atomic64_wrap_t *v, long long o, long long n)
> +{
> +	return cmpxchg64(&v->counter, o, n);
> +}
> +
> +/**
>   * atomic64_xchg - xchg atomic64 variable
>   * @v: pointer to type atomic64_t
>   * @n: value to assign
> @@ -95,6 +128,25 @@ static inline long long atomic64_xchg(atomic64_t *v, long long n)
>  }
>  
>  /**
> + * atomic64_xchg_wrap - xchg atomic64 variable
> + * @v: pointer to type atomic64_wrap_t
> + * @n: value to assign
> + *
> + * Atomically xchgs the value of @v to @n and returns
> + * the old value.
> + */
> +static inline long long atomic64_xchg_wrap(atomic64_wrap_t *v, long long n)
> +{
> +	long long o;
> +	unsigned high = (unsigned)(n >> 32);
> +	unsigned low = (unsigned)n;
> +	alternative_atomic64(xchg, "=&A" (o),
> +			     "S" (v), "b" (low), "c" (high)
> +			     : "memory");
> +	return o;
> +}
> +
> +/**
>   * atomic64_set - set atomic64 variable
>   * @v: pointer to type atomic64_t
>   * @i: value to assign
> @@ -111,6 +163,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
>  }
>  
>  /**
> + * atomic64_set_wrap - set atomic64 variable
> + * @v: pointer to type atomic64_wrap_t
> + * @n: value to assign
> + *
> + * Atomically sets the value of @v to @n.
> + */
> +static inline void atomic64_set_wrap(atomic64_wrap_t *v, long long i)
> +{
> +	unsigned high = (unsigned)(i >> 32);
> +	unsigned low = (unsigned)i;
> +	alternative_atomic64(set, /* no output */,
> +			     "S" (v), "b" (low), "c" (high)
> +			     : "eax", "edx", "memory");
> +}
> +
> +/**
>   * atomic64_read - read atomic64 variable
>   * @v: pointer to type atomic64_t
>   *
> @@ -121,7 +189,20 @@ static inline long long atomic64_read(const atomic64_t *v)
>  	long long r;
>  	alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
>  	return r;
> - }
> +}
> +
> +/**
> + * atomic64_read_wrap - read atomic64 variable
> + * @v: pointer to type atomic64_wrap_t
> + *
> + * Atomically reads the value of @v and returns it.
> + */
> +static inline long long atomic64_read_wrap(const atomic64_wrap_t *v)
> +{
> +	long long r;
> +	alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
> +	return r;
> +}
>  
>  /**
>   * atomic64_add_return - add and return
> @@ -138,6 +219,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
>  	return i;
>  }
>  
> +/**
> + * atomic64_add_return_wrap - add and return
> + * @i: integer value to add
> + * @v: pointer to type atomic64_wrap_t
> + *
> + * Atomically adds @i to @v and returns @i + *@v
> + */
> +static inline long long atomic64_add_return_wrap(long long i, atomic64_wrap_t *v)
> +{
> +	alternative_atomic64(add_return_wrap,
> +			     ASM_OUTPUT2("+A" (i), "+c" (v)),
> +			     ASM_NO_INPUT_CLOBBER("memory"));
> +	return i;
> +}
> +
>  /*
>   * Other variants with different arithmetic operators:
>   */
> @@ -149,6 +245,14 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
>  	return i;
>  }
>  
> +static inline long long atomic64_sub_return_wrap(long long i, atomic64_wrap_t *v)
> +{
> +	alternative_atomic64(sub_return,

sub_return_wrap?

Thanks,
-Takahiro AKASHI

> +			     ASM_OUTPUT2("+A" (i), "+c" (v)),
> +			     ASM_NO_INPUT_CLOBBER("memory"));
> +	return i;
> +}
> +
>  static inline long long atomic64_inc_return(atomic64_t *v)
>  {
>  	long long a;
> @@ -157,6 +261,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
>  	return a;
>  }
>  
> +static inline long long atomic64_inc_return_wrap(atomic64_wrap_t *v)
> +{
> +	long long a;
> +	alternative_atomic64(inc_return_wrap, "=&A" (a),
> +			     "S" (v) : "memory", "ecx");
> +	return a;
> +}
> +
>  static inline long long atomic64_dec_return(atomic64_t *v)
>  {
>  	long long a;
> @@ -165,6 +277,16 @@ static inline long long atomic64_dec_return(atomic64_t *v)
>  	return a;
>  }
>  
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static inline long long atomic64_dec_return_wrap(atomic64_wrap_t *v)
> +{
> +	long long a;
> +	alternative_atomic64(dec_return_wrap, "=&A" (a),
> +			     "S" (v) : "memory", "ecx");
> +	return a;
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
> +
>  /**
>   * atomic64_add - add integer to atomic64 variable
>   * @i: integer value to add
> @@ -181,6 +303,42 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
>  }
>  
>  /**
> + * atomic64_add_wrap - add integer to atomic64 variable
> + * @i: integer value to add
> + * @v: pointer to type atomic64_wrap_t
> + *
> + * Atomically adds @i to @v.
> + */
> +static inline long long atomic64_add_wrap(long long i, atomic64_wrap_t *v)
> +{
> +	__alternative_atomic64(add_wrap, add_return_wrap,
> +			       ASM_OUTPUT2("+A" (i), "+c" (v)),
> +			       ASM_NO_INPUT_CLOBBER("memory"));
> +	return i;
> +}
> +
> +/**
> + * atomic64_add_and_test - add value from variable and test result
> + * @i: integer value to add
> + * @v: pointer to type atomic64_t
> + *
> + * Atomically subtracts @i from @v and returns
> + * true if the result is zero, or false for all
> + * other cases.
> + */
> +static inline int atomic64_add_and_test(long long i, atomic64_t *v)
> +{
> +	return atomic64_add_return(i, v) == 0;
> +}
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static inline int atomic64_add_and_test_wrap(long long i, atomic64_wrap_t *v)
> +{
> +	return atomic64_add_return_wrap(i, v) == 0;
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
> +
> +/**
>   * atomic64_sub - subtract the atomic64 variable
>   * @i: integer value to subtract
>   * @v: pointer to type atomic64_t
> @@ -209,6 +367,13 @@ static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
>  	return atomic64_sub_return(i, v) == 0;
>  }
>  
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static inline int atomic64_sub_and_test_wrap(long long i, atomic64_wrap_t *v)
> +{
> +	return atomic64_sub_return_wrap(i, v) == 0;
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
> +
>  /**
>   * atomic64_inc - increment atomic64 variable
>   * @v: pointer to type atomic64_t
> @@ -222,6 +387,18 @@ static inline void atomic64_inc(atomic64_t *v)
>  }
>  
>  /**
> + * atomic64_inc_wrap - increment atomic64 variable
> + * @v: pointer to type atomic64_wrap_t
> + *
> + * Atomically increments @v by 1.
> + */
> +static inline void atomic64_inc_wrap(atomic64_wrap_t *v)
> +{
> +	__alternative_atomic64(inc_wrap, inc_return_wrap, /* no output */,
> +			       "S" (v) : "memory", "eax", "ecx", "edx");
> +}
> +
> +/**
>   * atomic64_dec - decrement atomic64 variable
>   * @v: pointer to type atomic64_t
>   *
> @@ -246,6 +423,13 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
>  	return atomic64_dec_return(v) == 0;
>  }
>  
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static inline int atomic64_dec_and_test_wrap(atomic64_wrap_t *v)
> +{
> +	return atomic64_dec_return_wrap(v) == 0;
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
> +
>  /**
>   * atomic64_inc_and_test - increment and test
>   * @v: pointer to type atomic64_t
> @@ -259,6 +443,13 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
>  	return atomic64_inc_return(v) == 0;
>  }
>  
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static inline int atomic64_inc_and_test_wrap(atomic64_wrap_t *v)
> +{
> +	return atomic64_inc_return_wrap(v) == 0;
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
> +
>  /**
>   * atomic64_add_negative - add and test if negative
>   * @i: integer value to add
> @@ -273,6 +464,13 @@ static inline int atomic64_add_negative(long long i, atomic64_t *v)
>  	return atomic64_add_return(i, v) < 0;
>  }
>  
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static inline int atomic64_add_negative_wrap(long long i, atomic64_wrap_t *v)
> +{
> +	return atomic64_add_return_wrap(i, v) < 0;
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
> +
>  /**
>   * atomic64_add_unless - add unless the number is a given value
>   * @v: pointer of type atomic64_t
> @@ -292,7 +490,6 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
>  	return (int)a;
>  }
>  
> -
>  static inline int atomic64_inc_not_zero(atomic64_t *v)
>  {
>  	int r;
> diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
> index 89ed2f6..d8d3a3d 100644
> --- a/arch/x86/include/asm/atomic64_64.h
> +++ b/arch/x86/include/asm/atomic64_64.h
> @@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
>  }
>  
>  /**
> + * atomic64_read_wrap - read atomic64 variable
> + * @v: pointer of type atomic64_wrap_t
> + *
> + * Atomically reads the value of @v.
> + * Doesn't imply a read memory barrier.
> + */
> +static inline long atomic64_read_wrap(const atomic64_wrap_t *v)
> +{
> +	return ACCESS_ONCE((v)->counter);
> +}
> +
> +/**
>   * atomic64_set - set atomic64 variable
>   * @v: pointer to type atomic64_t
>   * @i: required value
> @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
>  }
>  
>  /**
> + * atomic64_set_wrap - set atomic64 variable
> + * @v: pointer to type atomic64_wrap_t
> + * @i: required value
> + *
> + * Atomically sets the value of @v to @i.
> + */
> +static inline void atomic64_set_wrap(atomic64_wrap_t *v, long i)
> +{
> +	v->counter = i;
> +}
> +
> +/**
>   * atomic64_add - add integer to atomic64 variable
>   * @i: integer value to add
>   * @v: pointer to type atomic64_t
> @@ -42,12 +66,55 @@ static inline void atomic64_set(atomic64_t *v, long i)
>   */
>  static __always_inline void atomic64_add(long i, atomic64_t *v)
>  {
> +	asm volatile(LOCK_PREFIX "addq %1,%0\n"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     LOCK_PREFIX "subq %1,%0\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +
> +		     : "=m" (v->counter)
> +		     : "er" (i), "m" (v->counter));
> +}
> +
> +/**
> + * atomic64_add_wrap - add integer to atomic64 variable
> + * @i: integer value to add
> + * @v: pointer to type atomic64_wrap_t
> + *
> + * Atomically adds @i to @v.
> + */
> +static __always_inline void atomic64_add_wrap(long i, atomic64_wrap_t *v)
> +{
>  	asm volatile(LOCK_PREFIX "addq %1,%0"
>  		     : "=m" (v->counter)
>  		     : "er" (i), "m" (v->counter));
>  }
>  
>  /**
> + * atomic64_add_and_test - add value from variable and test result
> + * @i: integer value to add
> + * @v: pointer to type atomic64_t
> + *
> + * Atomically adds @i from @v and returns
> + * true if the result is zero, or false for all
> + * other cases.
> + */
> +static inline bool atomic64_add_and_test(long i, atomic64_t *v)
> +{
> +	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
> +}
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static inline bool atomic64_add_and_test_wrap(long i, atomic64_wrap_t *v)
> +{
> +	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addq", v->counter, "er", i, "%0", e);
> +}
> +#endif /* CONFIG_HARDENED_ATMOMIC */
> +
> +/**
>   * atomic64_sub - subtract the atomic64 variable
>   * @i: integer value to subtract
>   * @v: pointer to type atomic64_t
> @@ -56,6 +123,26 @@ static __always_inline void atomic64_add(long i, atomic64_t *v)
>   */
>  static inline void atomic64_sub(long i, atomic64_t *v)
>  {
> +	asm volatile(LOCK_PREFIX "subq %1,%0\n"
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     LOCK_PREFIX "addq %1,%0\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +		     : "=m" (v->counter)
> +		     : "er" (i), "m" (v->counter));
> +}
> +
> +/**
> ++ * atomic64_sub_wrap - subtract the atomic64 variable
> ++ * @i: integer value to subtract
> ++ * @v: pointer to type atomic64_wrap_t
> ++ *
> ++ * Atomically subtracts @i from @v.
> ++ */
> +static inline void atomic64_sub_wrap(long i, atomic64_wrap_t *v)
> +{
>  	asm volatile(LOCK_PREFIX "subq %1,%0"
>  		     : "=m" (v->counter)
>  		     : "er" (i), "m" (v->counter));
> @@ -72,7 +159,21 @@ static inline void atomic64_sub(long i, atomic64_t *v)
>   */
>  static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
>  {
> -	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
> +	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", e);
> +}
> +
> +/**
> + * atomic64_sub_and_test_wrap - subtract value from variable and test result
> + * @i: integer value to subtract
> + * @v: pointer to type atomic64_wrap_t
> + *
> + * Atomically subtracts @i from @v and returns
> + * true if the result is zero, or false for all
> + * other cases.
> + */
> +static inline bool atomic64_sub_and_test_wrap(long i, atomic64_wrap_t *v)
> +{
> +	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
>  }
>  
>  /**
> @@ -83,6 +184,26 @@ static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
>   */
>  static __always_inline void atomic64_inc(atomic64_t *v)
>  {
> +	asm volatile(LOCK_PREFIX "incq %0\n"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     LOCK_PREFIX "decq %0\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +		     : "=m" (v->counter)
> +		     : "m" (v->counter));
> +}
> +
> +/**
> + * atomic64_inc_wrap - increment atomic64 variable
> + * @v: pointer to type atomic64_wrap_t
> + *
> + * Atomically increments @v by 1.
> + */
> +static __always_inline void atomic64_inc_wrap(atomic64_wrap_t *v)
> +{
>  	asm volatile(LOCK_PREFIX "incq %0"
>  		     : "=m" (v->counter)
>  		     : "m" (v->counter));
> @@ -96,6 +217,26 @@ static __always_inline void atomic64_inc(atomic64_t *v)
>   */
>  static __always_inline void atomic64_dec(atomic64_t *v)
>  {
> +	asm volatile(LOCK_PREFIX "decq %0\n"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     LOCK_PREFIX "incq %0\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +		     : "=m" (v->counter)
> +		     : "m" (v->counter));
> +}
> +
> +/**
> + * atomic64_dec_wrap - decrement atomic64 variable
> + * @v: pointer to type atomic64_wrap_t
> + *
> + * Atomically decrements @v by 1.
> + */
> +static __always_inline void atomic64_dec_wrap(atomic64_wrap_t *v)
> +{
>  	asm volatile(LOCK_PREFIX "decq %0"
>  		     : "=m" (v->counter)
>  		     : "m" (v->counter));
> @@ -111,8 +252,15 @@ static __always_inline void atomic64_dec(atomic64_t *v)
>   */
>  static inline bool atomic64_dec_and_test(atomic64_t *v)
>  {
> -	GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
> +	GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", e);
> +}
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static inline bool atomic64_dec_and_test_wrap(atomic64_wrap_t *v)
> +{
> +	GEN_UNARY_RMWcc_wrap(LOCK_PREFIX "decq", v->counter, "%0", e);
>  }
> +#endif /* CONFIG_HARDENED_ATOMIC */
>  
>  /**
>   * atomic64_inc_and_test - increment and test
> @@ -124,8 +272,15 @@ static inline bool atomic64_dec_and_test(atomic64_t *v)
>   */
>  static inline bool atomic64_inc_and_test(atomic64_t *v)
>  {
> -	GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
> +	GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", e);
> +}
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static inline bool atomic64_inc_and_test_wrap(atomic64_wrap_t *v)
> +{
> +	GEN_UNARY_RMWcc_wrap(LOCK_PREFIX "incq", v->counter, "%0", e);
>  }
> +#endif /* CONFIG_HARDENED_ATOMIC */
>  
>  /**
>   * atomic64_add_negative - add and test if negative
> @@ -138,8 +293,15 @@ static inline bool atomic64_inc_and_test(atomic64_t *v)
>   */
>  static inline bool atomic64_add_negative(long i, atomic64_t *v)
>  {
> -	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
> +	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", s);
> +}
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static inline bool atomic64_add_negative_wrap(long i, atomic64_wrap_t *v)
> +{
> +	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
>  }
> +#endif /* CONFIG_HARDENED_ATOMIC */
>  
>  /**
>   * atomic64_add_return - add and return
> @@ -150,6 +312,11 @@ static inline bool atomic64_add_negative(long i, atomic64_t *v)
>   */
>  static __always_inline long atomic64_add_return(long i, atomic64_t *v)
>  {
> +	return i + xadd_check_overflow(&v->counter, i);
> +}
> +
> +static __always_inline long atomic64_add_return_wrap(long i, atomic64_wrap_t *v)
> +{
>  	return i + xadd(&v->counter, i);
>  }
>  
> @@ -158,6 +325,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
>  	return atomic64_add_return(-i, v);
>  }
>  
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static inline long atomic64_sub_return_wrap(long i, atomic64_wrap_t *v)
> +{
> +	return atomic64_add_return_wrap(-i, v);
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
> +
>  static inline long atomic64_fetch_add(long i, atomic64_t *v)
>  {
>  	return xadd(&v->counter, i);
> @@ -171,16 +345,29 @@ static inline long atomic64_fetch_sub(long i, atomic64_t *v)
>  #define atomic64_inc_return(v)  (atomic64_add_return(1, (v)))
>  #define atomic64_dec_return(v)  (atomic64_sub_return(1, (v)))
>  
> +#define atomic64_inc_return_wrap(v)  (atomic64_add_return_wrap(1, (v)))
> +#define atomic64_dec_return_wrap(v)  (atomic64_sub_return_wrap(1, (v)))
> +
>  static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
>  {
>  	return cmpxchg(&v->counter, old, new);
>  }
>  
> +static inline long atomic64_cmpxchg_wrap(atomic64_wrap_t *v, long old, long new)
> +{
> +	return cmpxchg(&v->counter, old, new);
> +}
> +
>  static inline long atomic64_xchg(atomic64_t *v, long new)
>  {
>  	return xchg(&v->counter, new);
>  }
>  
> +static inline long atomic64_xchg_wrap(atomic64_wrap_t *v, long new)
> +{
> +	return xchg(&v->counter, new);
> +}
> +
>  /**
>   * atomic64_add_unless - add unless the number is a given value
>   * @v: pointer of type atomic64_t
> @@ -192,11 +379,21 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
>   */
>  static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
>  {
> -	long c, old;
> +	long c, old, new;
>  	c = atomic64_read(v);
>  	for (;;) {
>  		if (unlikely(c == (u)))
>  			break;
> +		asm volatile("add %2,%0\n"
> +#ifdef CONFIG_HARDENED_ATOMIC
> +			     "jno 0f\n"
> +			     "sub %2,%0\n"
> +			     "int $4\n0:\n"
> +			     _ASM_EXTABLE(0b, 0b)
> +#endif
> +			     : "=r" (new)
> +			     : "0" (c), "ir" (a));
> +
>  		old = atomic64_cmpxchg((v), c, c + (a));
>  		if (likely(old == c))
>  			break;
> @@ -205,6 +402,27 @@ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
>  	return c != (u);
>  }
>  
> +#ifdef CONFIG_HARDENED_ATOMIC
> +static inline bool atomic64_add_unless_wrap(atomic64_wrap_t *v, long a, long u)
> +{
> +	long c, old, new;
> +	c = atomic64_read_wrap(v);
> +	for (;;) {
> +		if (unlikely(c == (u)))
> +			break;
> +		asm volatile("add %2,%0\n"
> +			     : "=r" (new)
> +			     : "0" (c), "ir" (a));
> +
> +		old = atomic64_cmpxchg_wrap((v), c, c + (a));
> +		if (likely(old == c))
> +			break;
> +		c = old;
> +	}
> +	return c != (u);
> +}
> +#endif /* CONFIG_HARDENED_ATOMIC */
> +
>  #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
>  
>  /*
> diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
> index 68557f52..e25eb0d 100644
> --- a/arch/x86/include/asm/bitops.h
> +++ b/arch/x86/include/asm/bitops.h
> @@ -50,7 +50,7 @@
>   * a mask operation on a byte.
>   */
>  #define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))
> -#define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
> +#define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
>  #define CONST_MASK(nr)			(1 << ((nr) & 7))
>  
>  /**
> @@ -203,7 +203,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
>   */
>  static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
>  {
> -	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
> +	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
>  }
>  
>  /**
> @@ -249,7 +249,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
>   */
>  static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
>  {
> -	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
> +	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
>  }
>  
>  /**
> @@ -302,7 +302,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
>   */
>  static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
>  {
> -	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
> +	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
>  }
>  
>  static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
> diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
> index 9733361..b83f612 100644
> --- a/arch/x86/include/asm/cmpxchg.h
> +++ b/arch/x86/include/asm/cmpxchg.h
> @@ -13,10 +13,14 @@ extern void __xchg_wrong_size(void)
>  	__compiletime_error("Bad argument size for xchg");
>  extern void __cmpxchg_wrong_size(void)
>  	__compiletime_error("Bad argument size for cmpxchg");
> +extern void __xadd_check_overflow_wrong_size(void)
> +	__compiletime_error("Bad argument size for xadd_check_overflow");
>  extern void __xadd_wrong_size(void)
>  	__compiletime_error("Bad argument size for xadd");
>  extern void __add_wrong_size(void)
>  	__compiletime_error("Bad argument size for add");
> +extern void __add_check_overflow_wrong_size(void)
> +	__compiletime_error("Bad argument size for add_check_overflow");
>  
>  /*
>   * Constants for operation sizes. On 32-bit, the 64-bit size it set to
> @@ -68,6 +72,38 @@ extern void __add_wrong_size(void)
>  		__ret;							\
>  	})
>  
> +#ifdef CONFIG_HARDENED_ATOMIC
> +#define __xchg_op_check_overflow(ptr, arg, op, lock)			\
> +	({								\
> +	        __typeof__ (*(ptr)) __ret = (arg);			\
> +		switch (sizeof(*(ptr))) {				\
> +		case __X86_CASE_L:					\
> +			asm volatile (lock #op "l %0, %1\n"		\
> +				      "jno 0f\n"			\
> +				      "mov %0,%1\n"			\
> +				      "int $4\n0:\n"			\
> +				      _ASM_EXTABLE(0b, 0b)		\
> +				      : "+r" (__ret), "+m" (*(ptr))	\
> +				      : : "memory", "cc");		\
> +			break;						\
> +		case __X86_CASE_Q:					\
> +			asm volatile (lock #op "q %q0, %1\n"		\
> +				      "jno 0f\n"			\
> +				      "mov %0,%1\n"			\
> +				      "int $4\n0:\n"			\
> +				      _ASM_EXTABLE(0b, 0b)		\
> +				      : "+r" (__ret), "+m" (*(ptr))	\
> +				      : : "memory", "cc");		\
> +			break;						\
> +		default:						\
> +			__ ## op ## _check_overflow_wrong_size();	\
> +		}							\
> +		__ret;							\
> +	})
> +#else
> +#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
> +#endif
> +
>  /*
>   * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
>   * Since this is generally used to protect other memory information, we
> @@ -166,6 +202,9 @@ extern void __add_wrong_size(void)
>  #define xadd_sync(ptr, inc)	__xadd((ptr), (inc), "lock; ")
>  #define xadd_local(ptr, inc)	__xadd((ptr), (inc), "")
>  
> +#define __xadd_check_overflow(ptr, inc, lock)	__xchg_op_check_overflow((ptr), (inc), xadd, lock)
> +#define xadd_check_overflow(ptr, inc)		__xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
> +
>  #define __add(ptr, inc, lock)						\
>  	({								\
>  	        __typeof__ (*(ptr)) __ret = (inc);			\
> diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
> index 7511978..46cfaf0 100644
> --- a/arch/x86/include/asm/local.h
> +++ b/arch/x86/include/asm/local.h
> @@ -10,25 +10,69 @@ typedef struct {
>  	atomic_long_t a;
>  } local_t;
>  
> +typedef struct {
> +	atomic_long_wrap_t a;
> +} local_wrap_t;
> +
>  #define LOCAL_INIT(i)	{ ATOMIC_LONG_INIT(i) }
>  
>  #define local_read(l)	atomic_long_read(&(l)->a)
> +#define local_read_wrap(l)	atomic_long_read_wrap(&(l)->a)
>  #define local_set(l, i)	atomic_long_set(&(l)->a, (i))
> +#define local_set_wrap(l, i)	atomic_long_set_wrap(&(l)->a, (i))
>  
>  static inline void local_inc(local_t *l)
>  {
> +	asm volatile(_ASM_INC "%0\n"
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     _ASM_DEC "%0\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +		     : "+m" (l->a.counter));
> +}
> +
> +static inline void local_inc_wrap(local_wrap_t *l)
> +{
>  	asm volatile(_ASM_INC "%0"
>  		     : "+m" (l->a.counter));
>  }
>  
>  static inline void local_dec(local_t *l)
>  {
> +	asm volatile(_ASM_DEC "%0\n"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     _ASM_INC "%0\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +		     : "+m" (l->a.counter));
> +}
> +
> +static inline void local_dec_wrap(local_wrap_t *l)
> +{
>  	asm volatile(_ASM_DEC "%0"
>  		     : "+m" (l->a.counter));
>  }
>  
>  static inline void local_add(long i, local_t *l)
>  {
> +	asm volatile(_ASM_ADD "%1,%0\n"
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     _ASM_SUB "%1,%0\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +		     : "+m" (l->a.counter)
> +		     : "ir" (i));
> +}
> +
> +static inline void local_add_wrap(long i, local_wrap_t *l)
> +{
>  	asm volatile(_ASM_ADD "%1,%0"
>  		     : "+m" (l->a.counter)
>  		     : "ir" (i));
> @@ -36,6 +80,19 @@ static inline void local_add(long i, local_t *l)
>  
>  static inline void local_sub(long i, local_t *l)
>  {
> +	asm volatile(_ASM_SUB "%1,%0\n"
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     _ASM_ADD "%1,%0\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +		     : "+m" (l->a.counter)
> +		     : "ir" (i));
> +}
> +
> +static inline void local_sub_wrap(long i, local_wrap_t *l)
> +{
>  	asm volatile(_ASM_SUB "%1,%0"
>  		     : "+m" (l->a.counter)
>  		     : "ir" (i));
> @@ -52,7 +109,7 @@ static inline void local_sub(long i, local_t *l)
>   */
>  static inline bool local_sub_and_test(long i, local_t *l)
>  {
> -	GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e);
> +	GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", e);
>  }
>  
>  /**
> @@ -65,7 +122,7 @@ static inline bool local_sub_and_test(long i, local_t *l)
>   */
>  static inline bool local_dec_and_test(local_t *l)
>  {
> -	GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e);
> +	GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", e);
>  }
>  
>  /**
> @@ -78,7 +135,7 @@ static inline bool local_dec_and_test(local_t *l)
>   */
>  static inline bool local_inc_and_test(local_t *l)
>  {
> -	GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e);
> +	GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", e);
>  }
>  
>  /**
> @@ -92,7 +149,7 @@ static inline bool local_inc_and_test(local_t *l)
>   */
>  static inline bool local_add_negative(long i, local_t *l)
>  {
> -	GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s);
> +	GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", s);
>  }
>  
>  /**
> @@ -105,6 +162,28 @@ static inline bool local_add_negative(long i, local_t *l)
>  static inline long local_add_return(long i, local_t *l)
>  {
>  	long __i = i;
> +	asm volatile(_ASM_XADD "%0, %1\n"
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     _ASM_MOV "%0,%1\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +		     : "+r" (i), "+m" (l->a.counter)
> +		     : : "memory");
> +	return i + __i;
> +}
> +
> +/**
> + * local_add_return_wrap - add and return
> + * @i: integer value to add
> + * @l: pointer to type local_wrap_t
> + *
> + * Atomically adds @i to @l and returns @i + @l
> + */
> +static inline long local_add_return_wrap(long i, local_wrap_t *l)
> +{
> +	long __i = i;
>  	asm volatile(_ASM_XADD "%0, %1;"
>  		     : "+r" (i), "+m" (l->a.counter)
>  		     : : "memory");
> @@ -121,6 +200,8 @@ static inline long local_sub_return(long i, local_t *l)
>  
>  #define local_cmpxchg(l, o, n) \
>  	(cmpxchg_local(&((l)->a.counter), (o), (n)))
> +#define local_cmpxchg_wrap(l, o, n) \
> +	(cmpxchg_local(&((l)->a.counter), (o), (n)))
>  /* Always has a lock prefix */
>  #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
>  
> diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
> index 17f2186..2fa0e84 100644
> --- a/arch/x86/include/asm/preempt.h
> +++ b/arch/x86/include/asm/preempt.h
> @@ -81,7 +81,7 @@ static __always_inline void __preempt_count_sub(int val)
>   */
>  static __always_inline bool __preempt_count_dec_and_test(void)
>  {
> -	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
> +    GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), e);
>  }
>  
>  /*
> diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
> index 661dd30..0375d3f 100644
> --- a/arch/x86/include/asm/rmwcc.h
> +++ b/arch/x86/include/asm/rmwcc.h
> @@ -5,28 +5,80 @@
>  
>  /* Use asm goto */
>  
> -#define __GEN_RMWcc(fullop, var, cc, ...)				\
> +#ifdef CONFIG_HARDENED_ATOMIC
> +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)			\
>  do {									\
> -	asm_volatile_goto (fullop "; j" #cc " %l[cc_label]"		\
> +	asm_volatile_goto (fullop					\
> +			";jno 0f\n"					\
> +			fullantiop					\
> +			";int $4\n0:\n"					\
> +			_ASM_EXTABLE(0b, 0b)				\
> +			 ";j" #cc " %l[cc_label]"			\
>  			: : "m" (var), ## __VA_ARGS__ 			\
>  			: "memory" : cc_label);				\
>  	return 0;							\
>  cc_label:								\
>  	return 1;							\
>  } while (0)
> +#else
> +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)			\
> +do {									\
> +	asm_volatile_goto (fullop ";j" #cc " %l[cc_label]"		\
> +			: : "m" (var), ## __VA_ARGS__ 			\
> +			: "memory" : cc_label);				\
> +	return 0;							\
> +cc_label:								\
> +	return 1;							\
> +} while (0)
> +#endif
>  
> -#define GEN_UNARY_RMWcc(op, var, arg0, cc) 				\
> -	__GEN_RMWcc(op " " arg0, var, cc)
> -
> -#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)			\
> -	__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
> +#define __GEN_RMWcc_wrap(fullop, var, cc, ...)do {									\
> +	asm_volatile_goto (fullop "; j" #cc " %l[cc_label]"		\
> +			: : "m" (var), ## __VA_ARGS__ 			\
> +			: "memory" : cc_label);				\
> +	return 0;							\
> +cc_label:								\
> +	return 1;							\
> +} while (0)
>  
> +#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) 			\
> +	__GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
> +#define GEN_UNARY_RMWcc_wrap(op, var, arg0, cc) 			\
> +	__GEN_RMWcc_wrap(op " " arg0, var, cc)
> +#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc)		\
> +	__GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
> +#define GEN_BINARY_RMWcc_wrap(op, var, vcon, val, arg0, cc)	\
> +	__GEN_RMWcc_wrap(op " %1, " arg0, var, cc, vcon (val))
>  #else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
>  
>  /* Use flags output or a set instruction */
>  
> -#define __GEN_RMWcc(fullop, var, cc, ...)				\
> +#ifdef CONFIG_HARDENED_ATOMIC
> +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)			\
>  do {									\
> +	char c;								\
> +	asm volatile (fullop 						\
> +			";jno 0f\n"					\
> +			fullantiop					\
> +			";int $4\n0:\n"					\
> +			_ASM_EXTABLE(0b, 0b)				\
> +			";" CC_SET(cc)				\
> +			: "+m" (var), CC_OUT(cc) (c)			\
> +			: __VA_ARGS__ : "memory");			\
> +	return c != 0;							\
> +} while (0)
> +#else
> +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)			\
> +do {									\
> +	char c;								\
> +	asm volatile (fullop ";" CC_SET(cc)				\
> +			: "+m" (var), CC_OUT(cc) (c)			\
> +			: __VA_ARGS__ : "memory");			\
> +	return c != 0;							\
> +} while (0)
> +#endif
> +
> +#define __GEN_RMWcc_wrap(fullop, var, cc, ...)do {									\
>  	bool c;								\
>  	asm volatile (fullop ";" CC_SET(cc)				\
>  			: "+m" (var), CC_OUT(cc) (c)			\
> @@ -34,12 +86,14 @@ do {									\
>  	return c;							\
>  } while (0)
>  
> -#define GEN_UNARY_RMWcc(op, var, arg0, cc)				\
> -	__GEN_RMWcc(op " " arg0, var, cc)
> -
> -#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)			\
> -	__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
> -
> +#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc)			\
> +	__GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
> +#define GEN_UNARY_RMWcc_wrap(op, var, arg0, cc)			\
> +	__GEN_RMWcc_wrap(op " " arg0, var, cc)
> +#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc)		\
> +	__GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
> +#define GEN_BINARY_RMWcc_wrap(op, var, vcon, val, arg0, cc)	\
> +	__GEN_RMWcc_wrap(op " %2, " arg0, var, cc, vcon (val))
>  #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
>  
>  #endif /* _ASM_X86_RMWcc */
> diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
> index 3d33a71..4d3f8a5 100644
> --- a/arch/x86/include/asm/rwsem.h
> +++ b/arch/x86/include/asm/rwsem.h
> @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
>  {
>  	asm volatile("# beginning down_read\n\t"
>  		     LOCK_PREFIX _ASM_INC "(%1)\n\t"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     LOCK_PREFIX _ASM_DEC "(%1)\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +
>  		     /* adds 0x00000001 */
>  		     "  jns        1f\n"
>  		     "  call call_rwsem_down_read_failed\n"
> @@ -85,6 +93,14 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
>  		     "1:\n\t"
>  		     "  mov          %1,%2\n\t"
>  		     "  add          %3,%2\n\t"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     "sub %3,%2\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +
>  		     "  jle	     2f\n\t"
>  		     LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
>  		     "  jnz	     1b\n\t"
> @@ -99,12 +115,22 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
>  /*
>   * lock for writing
>   */
> +#ifdef CONFIG_HARDENED_ATOMIC
> +#define ____down_write_undo \
> +		     "jno 0f\n"\
> +		     "mov %1,(%2)\n"\
> +		     "int $4\n0:\n"\
> +		     _ASM_EXTABLE(0b, 0b)
> +#else
> +#define ____down_write_undo
> +#endif
>  #define ____down_write(sem, slow_path)			\
>  ({							\
>  	long tmp;					\
>  	struct rw_semaphore* ret;			\
>  	asm volatile("# beginning down_write\n\t"	\
>  		     LOCK_PREFIX "  xadd      %1,(%3)\n\t"	\
> +		     ____down_write_undo		\
>  		     /* adds 0xffff0001, returns the old value */ \
>  		     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
>  		     /* was the active mask 0 before? */\
> @@ -166,6 +192,14 @@ static inline void __up_read(struct rw_semaphore *sem)
>  	long tmp;
>  	asm volatile("# beginning __up_read\n\t"
>  		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     "mov %1,(%2)\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +
>  		     /* subtracts 1, returns the old value */
>  		     "  jns        1f\n\t"
>  		     "  call call_rwsem_wake\n" /* expects old value in %edx */
> @@ -184,6 +218,14 @@ static inline void __up_write(struct rw_semaphore *sem)
>  	long tmp;
>  	asm volatile("# beginning __up_write\n\t"
>  		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     "mov %1,(%2)\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +
>  		     /* subtracts 0xffff0001, returns the old value */
>  		     "  jns        1f\n\t"
>  		     "  call call_rwsem_wake\n" /* expects old value in %edx */
> @@ -201,6 +243,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
>  {
>  	asm volatile("# beginning __downgrade_write\n\t"
>  		     LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
> +
> +#ifdef CONFIG_HARDENED_ATOMIC
> +		     "jno 0f\n"
> +		     LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
> +		     "int $4\n0:\n"
> +		     _ASM_EXTABLE(0b, 0b)
> +#endif
> +
>  		     /*
>  		      * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
>  		      *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
> diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
> index bd4e3d4..d67a914 100644
> --- a/arch/x86/kernel/traps.c
> +++ b/arch/x86/kernel/traps.c
> @@ -191,6 +191,10 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
>  			tsk->thread.trap_nr = trapnr;
>  			die(str, regs, error_code);
>  		}
> +
> +		if (trapnr == X86_TRAP_OF)
> +			hardened_atomic_overflow(regs);
> +
>  		return 0;
>  	}
>  
> diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
> index 9b0ca8f..0e8a888 100644
> --- a/arch/x86/lib/atomic64_386_32.S
> +++ b/arch/x86/lib/atomic64_386_32.S
> @@ -45,6 +45,10 @@ BEGIN(read)
>  	movl  (v), %eax
>  	movl 4(v), %edx
>  RET_ENDP
> +BEGIN(read_wrap)
> +	movl  (v), %eax
> +	movl 4(v), %edx
> +RET_ENDP
>  #undef v
>  
>  #define v %esi
> @@ -52,6 +56,10 @@ BEGIN(set)
>  	movl %ebx,  (v)
>  	movl %ecx, 4(v)
>  RET_ENDP
> +BEGIN(set_wrap)
> +	movl %ebx,  (v)
> +	movl %ecx, 4(v)
> +RET_ENDP
>  #undef v
>  
>  #define v  %esi
> @@ -67,6 +75,18 @@ RET_ENDP
>  BEGIN(add)
>  	addl %eax,  (v)
>  	adcl %edx, 4(v)
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	jno 0f
> +	subl %eax,  (v)
> +	sbbl %edx, 4(v)
> +	int $4
> +0:
> +	_ASM_EXTABLE(0b, 0b)
> +#endif
> +RET_ENDP
> +BEGIN(add_wrap)
> +	addl %eax,  (v)
> +	adcl %edx, 4(v)
>  RET_ENDP
>  #undef v
>  
> @@ -74,6 +94,20 @@ RET_ENDP
>  BEGIN(add_return)
>  	addl  (v), %eax
>  	adcl 4(v), %edx
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	into
> +1234:
> +	_ASM_EXTABLE(1234b, 2f)
> +#endif
> +	movl %eax,  (v)
> +	movl %edx, 4(v)
> +#ifdef CONFIG_HARDENED_ATOMIC
> +2:
> +#endif
> +RET_ENDP
> +BEGIN(add_return_wrap)
> +	addl  (v), %eax
> +	adcl 4(v), %edx
>  	movl %eax,  (v)
>  	movl %edx, 4(v)
>  RET_ENDP
> @@ -83,6 +117,18 @@ RET_ENDP
>  BEGIN(sub)
>  	subl %eax,  (v)
>  	sbbl %edx, 4(v)
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	jno 0f
> +	addl %eax,  (v)
> +	adcl %edx, 4(v)
> +	int $4
> +0:
> +	_ASM_EXTABLE(0b, 0b)
> +#endif
> +RET_ENDP
> +BEGIN(sub_wrap)
> +	subl %eax,  (v)
> +	sbbl %edx, 4(v)
>  RET_ENDP
>  #undef v
>  
> @@ -93,6 +139,23 @@ BEGIN(sub_return)
>  	sbbl $0, %edx
>  	addl  (v), %eax
>  	adcl 4(v), %edx
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	into
> +1234:
> +	_ASM_EXTABLE(1234b, 2f)
> +#endif
> +	movl %eax,  (v)
> +	movl %edx, 4(v)
> +#ifdef CONFIG_HARDENED_ATOMIC
> +2:
> +#endif
> +RET_ENDP
> +BEGIN(sub_return_wrap)
> +	negl %edx
> +	negl %eax
> +	sbbl $0, %edx
> +	addl  (v), %eax
> +	adcl 4(v), %edx
>  	movl %eax,  (v)
>  	movl %edx, 4(v)
>  RET_ENDP
> @@ -102,6 +165,19 @@ RET_ENDP
>  BEGIN(inc)
>  	addl $1,  (v)
>  	adcl $0, 4(v)
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	jno 0f
> +	subl $1,  (v)
> +	sbbl $0, 4(v)
> +	int $4
> +0:
> +	_ASM_EXTABLE(0b, 0b)
> +#endif
> +
> +RET_ENDP
> +BEGIN(inc_wrap)
> +	addl $1,  (v)
> +	adcl $0, 4(v)
>  RET_ENDP
>  #undef v
>  
> @@ -111,6 +187,22 @@ BEGIN(inc_return)
>  	movl 4(v), %edx
>  	addl $1, %eax
>  	adcl $0, %edx
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	into
> +1234:
> +	_ASM_EXTABLE(1234b, 2f)
> +#endif
> +	movl %eax,  (v)
> +	movl %edx, 4(v)
> +#ifdef CONFIG_HARDENED_ATOMIC
> +2:
> +#endif
> +RET_ENDP
> +BEGIN(inc_return_wrap)
> +	movl  (v), %eax
> +	movl 4(v), %edx
> +	addl $1, %eax
> +	adcl $0, %edx
>  	movl %eax,  (v)
>  	movl %edx, 4(v)
>  RET_ENDP
> @@ -120,6 +212,18 @@ RET_ENDP
>  BEGIN(dec)
>  	subl $1,  (v)
>  	sbbl $0, 4(v)
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	jno 0f
> +	addl $1,  (v)
> +	adcl $0, 4(v)
> +	int $4
> +0:
> +	_ASM_EXTABLE(0b, 0b)
> +#endif
> +RET_ENDP
> +BEGIN(dec_wrap)
> +	subl $1,  (v)
> +	sbbl $0, 4(v)
>  RET_ENDP
>  #undef v
>  
> @@ -129,6 +233,22 @@ BEGIN(dec_return)
>  	movl 4(v), %edx
>  	subl $1, %eax
>  	sbbl $0, %edx
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	into
> +1234:
> +	_ASM_EXTABLE(1234b, 2f)
> +#endif
> +	movl %eax,  (v)
> +	movl %edx, 4(v)
> +#ifdef CONFIG_HARDENED_ATOMIC
> +2:
> +#endif
> +RET_ENDP
> +BEGIN(dec_return_wrap)
> +	movl  (v), %eax
> +	movl 4(v), %edx
> +	subl $1, %eax
> +	sbbl $0, %edx
>  	movl %eax,  (v)
>  	movl %edx, 4(v)
>  RET_ENDP
> @@ -140,6 +260,11 @@ BEGIN(add_unless)
>  	adcl %edx, %edi
>  	addl  (v), %eax
>  	adcl 4(v), %edx
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	into
> +1234:
> +	_ASM_EXTABLE(1234b, 2f)
> +#endif
>  	cmpl %eax, %ecx
>  	je 3f
>  1:
> @@ -165,6 +290,11 @@ BEGIN(inc_not_zero)
>  1:
>  	addl $1, %eax
>  	adcl $0, %edx
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	into
> +1234:
> +	_ASM_EXTABLE(1234b, 2f)
> +#endif
>  	movl %eax,  (v)
>  	movl %edx, 4(v)
>  	movl $1, %eax
> @@ -183,6 +313,11 @@ BEGIN(dec_if_positive)
>  	movl 4(v), %edx
>  	subl $1, %eax
>  	sbbl $0, %edx
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	into
> +1234:
> +	_ASM_EXTABLE(1234b, 1f)
> +#endif
>  	js 1f
>  	movl %eax,  (v)
>  	movl %edx, 4(v)
> diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
> index db3ae854..5bd864e 100644
> --- a/arch/x86/lib/atomic64_cx8_32.S
> +++ b/arch/x86/lib/atomic64_cx8_32.S
> @@ -22,9 +22,19 @@
>  
>  ENTRY(atomic64_read_cx8)
>  	read64 %ecx
> +	/* Pax has pax_force_retaddr here
> +	 * do we want similar? If yes, changes
> +	 * have to be made in more places below */
>  	ret
>  ENDPROC(atomic64_read_cx8)
>  
> +ENTRY(atomic64_read_wrap_cx8)
> +	read64 %ecx
> +/* do we want smth like the below line?
> + *	pax_force_retaddr */
> +	ret
> +ENDPROC(atomic64_read_wrap_cx8)
> +
>  ENTRY(atomic64_set_cx8)
>  1:
>  /* we don't need LOCK_PREFIX since aligned 64-bit writes
> @@ -35,6 +45,17 @@ ENTRY(atomic64_set_cx8)
>  	ret
>  ENDPROC(atomic64_set_cx8)
>  
> +ENTRY(atomic64_set_wrap_cx8)
> +1:
> +/* we don't need LOCK_PREFIX since aligned 64-bit writes
> + * are atomic on 586 and newer */
> +	cmpxchg8b (%esi)
> +	jne 1b
> +
> +	/* pax_force_retaddr */
> +	ret
> +ENDPROC(atomic64_set_wrap_cx8)
> +
>  ENTRY(atomic64_xchg_cx8)
>  1:
>  	LOCK_PREFIX
> @@ -44,8 +65,8 @@ ENTRY(atomic64_xchg_cx8)
>  	ret
>  ENDPROC(atomic64_xchg_cx8)
>  
> -.macro addsub_return func ins insc
> -ENTRY(atomic64_\func\()_return_cx8)
> +.macro addsub_return func ins insc wrap=""
> +ENTRY(atomic64_\func\()_return\wrap\()_cx8)
>  	pushl %ebp
>  	pushl %ebx
>  	pushl %esi
> @@ -61,6 +82,13 @@ ENTRY(atomic64_\func\()_return_cx8)
>  	movl %edx, %ecx
>  	\ins\()l %esi, %ebx
>  	\insc\()l %edi, %ecx
> +#ifdef CONFIG_HARDENED_ATOMIC
> +.ifb \wrap
> +	into
> +2:
> +	_ASM_EXTABLE(2b, 3f)
> +.endif
> +#endif
>  	LOCK_PREFIX
>  	cmpxchg8b (%ebp)
>  	jne 1b
> @@ -68,19 +96,27 @@ ENTRY(atomic64_\func\()_return_cx8)
>  10:
>  	movl %ebx, %eax
>  	movl %ecx, %edx
> +
> +.ifb \wrap
> +#ifdef CONFIG_HARDENED_ATOMIC
> +3:
> +#endif
> +.endif
>  	popl %edi
>  	popl %esi
>  	popl %ebx
>  	popl %ebp
>  	ret
> -ENDPROC(atomic64_\func\()_return_cx8)
> +ENDPROC(atomic64_\func\()_return\wrap\()_cx8)
>  .endm
>  
>  addsub_return add add adc
>  addsub_return sub sub sbb
> +addsub_return add add adc _wrap
> +addsub_return sub sub sbb _wrap
>  
> -.macro incdec_return func ins insc
> -ENTRY(atomic64_\func\()_return_cx8)
> +.macro incdec_return func ins insc wrap=""
> +ENTRY(atomic64_\func\()_return\wrap\()_cx8)
>  	pushl %ebx
>  
>  	read64 %esi
> @@ -89,6 +125,13 @@ ENTRY(atomic64_\func\()_return_cx8)
>  	movl %edx, %ecx
>  	\ins\()l $1, %ebx
>  	\insc\()l $0, %ecx
> +#ifdef CONFIG_HARDENED_ATOMIC
> +.ifb \wrap
> +	into
> +2:
> +	_ASM_EXTABLE(2b, 3f)
> +.endif
> +#endif
>  	LOCK_PREFIX
>  	cmpxchg8b (%esi)
>  	jne 1b
> @@ -96,13 +139,21 @@ ENTRY(atomic64_\func\()_return_cx8)
>  10:
>  	movl %ebx, %eax
>  	movl %ecx, %edx
> +
> +.ifb \wrap
> +#ifdef CONFIG_HARDENED_ATOMIC
> +3:
> +#endif
> +.endif
>  	popl %ebx
>  	ret
> -ENDPROC(atomic64_\func\()_return_cx8)
> +ENDPROC(atomic64_\func\()_return\wrap\()_cx8)
>  .endm
>  
>  incdec_return inc add adc
>  incdec_return dec sub sbb
> +incdec_return inc add adc _wrap
> +incdec_return dec sub sbb _wrap
>  
>  ENTRY(atomic64_dec_if_positive_cx8)
>  	pushl %ebx
> @@ -113,6 +164,11 @@ ENTRY(atomic64_dec_if_positive_cx8)
>  	movl %edx, %ecx
>  	subl $1, %ebx
>  	sbb $0, %ecx
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	into
> +1234:
> +	_ASM_EXTABLE(1234b, 2f)
> +#endif
>  	js 2f
>  	LOCK_PREFIX
>  	cmpxchg8b (%esi)
> @@ -144,6 +200,11 @@ ENTRY(atomic64_add_unless_cx8)
>  	movl %edx, %ecx
>  	addl %ebp, %ebx
>  	adcl %edi, %ecx
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	into
> +1234:
> +	_ASM_EXTABLE(1234b, 3f)
> +#endif
>  	LOCK_PREFIX
>  	cmpxchg8b (%esi)
>  	jne 1b
> @@ -173,6 +234,11 @@ ENTRY(atomic64_inc_not_zero_cx8)
>  	xorl %ecx, %ecx
>  	addl $1, %ebx
>  	adcl %edx, %ecx
> +#ifdef CONFIG_HARDENED_ATOMIC
> +	into
> +1234:
> +	_ASM_EXTABLE(1234b, 3f)
> +#endif
>  	LOCK_PREFIX
>  	cmpxchg8b (%esi)
>  	jne 1b
> -- 
> 2.7.4
>
David Windsor Oct. 26, 2016, 6:55 a.m. UTC | #2
On Wed, Oct 26, 2016 at 1:06 AM, AKASHI Takahiro
<takahiro.akashi@linaro.org> wrote:
> Elena,
>
> On Thu, Oct 20, 2016 at 01:25:30PM +0300, Elena Reshetova wrote:
>> This adds x86-specific code in order to support
>> HARDENED_ATOMIC feature. When overflow is detected
>> in atomic_t or atomic_long_t types, the counter is
>> decremented back by one (to keep it at INT_MAX or
>
> That's fine, but
>
>> LONG_MAX) and issue is reported using BUG().
>> The side effect is that in both legitimate and
>> non-legitimate cases a counter cannot wrap.
>>
>> Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
>> Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
>> Signed-off-by: David Windsor <dwindsor@gmail.com>
>> ---
>>  arch/x86/Kconfig                   |   1 +
>>  arch/x86/include/asm/atomic.h      | 323 +++++++++++++++++++++++++++++++++++--
>>  arch/x86/include/asm/atomic64_32.h | 201 ++++++++++++++++++++++-
>>  arch/x86/include/asm/atomic64_64.h | 228 +++++++++++++++++++++++++-
>>  arch/x86/include/asm/bitops.h      |   8 +-
>>  arch/x86/include/asm/cmpxchg.h     |  39 +++++
>>  arch/x86/include/asm/local.h       |  89 +++++++++-
>>  arch/x86/include/asm/preempt.h     |   2 +-
>>  arch/x86/include/asm/rmwcc.h       |  82 ++++++++--
>>  arch/x86/include/asm/rwsem.h       |  50 ++++++
>>  arch/x86/kernel/traps.c            |   4 +
>>  arch/x86/lib/atomic64_386_32.S     | 135 ++++++++++++++++
>>  arch/x86/lib/atomic64_cx8_32.S     |  78 ++++++++-
>>  13 files changed, 1194 insertions(+), 46 deletions(-)
>>
>> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
>> index 402eee4..6c36184 100644
>> --- a/arch/x86/Kconfig
>> +++ b/arch/x86/Kconfig
>> @@ -79,6 +79,7 @@ config X86
>>       select HAVE_AOUT                        if X86_32
>>       select HAVE_ARCH_AUDITSYSCALL
>>       select HAVE_ARCH_HARDENED_USERCOPY
>> +     select HAVE_ARCH_HARDENED_ATOMIC
>>       select HAVE_ARCH_HUGE_VMAP              if X86_64 || X86_PAE
>>       select HAVE_ARCH_JUMP_LABEL
>>       select HAVE_ARCH_KASAN                  if X86_64 && SPARSEMEM_VMEMMAP
>> diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
>> index 14635c5..4a35c9b 100644
>> --- a/arch/x86/include/asm/atomic.h
>> +++ b/arch/x86/include/asm/atomic.h
>> @@ -27,6 +27,17 @@ static __always_inline int atomic_read(const atomic_t *v)
>>  }
>>
>>  /**
>> + * atomic_read_wrap - read atomic variable
>> + * @v: pointer of type atomic_wrap_t
>> + *
>> + * Atomically reads the value of @v.
>> + */
>> +static __always_inline int atomic_read_wrap(const atomic_wrap_t *v)
>> +{
>> +     return ACCESS_ONCE((v)->counter);
>> +}
>> +
>> +/**
>>   * atomic_set - set atomic variable
>>   * @v: pointer of type atomic_t
>>   * @i: required value
>> @@ -39,6 +50,18 @@ static __always_inline void atomic_set(atomic_t *v, int i)
>>  }
>>
>>  /**
>> + * atomic_set_wrap - set atomic variable
>> + * @v: pointer of type atomic_wrap_t
>> + * @i: required value
>> + *
>> + * Atomically sets the value of @v to @i.
>> + */
>> +static __always_inline void atomic_set_wrap(atomic_wrap_t *v, int i)
>> +{
>> +     v->counter = i;
>> +}
>> +
>> +/**
>>   * atomic_add - add integer to atomic variable
>>   * @i: integer value to add
>>   * @v: pointer of type atomic_t
>> @@ -47,12 +70,55 @@ static __always_inline void atomic_set(atomic_t *v, int i)
>>   */
>>  static __always_inline void atomic_add(int i, atomic_t *v)
>>  {
>> -     asm volatile(LOCK_PREFIX "addl %1,%0"
>> +     asm volatile(LOCK_PREFIX "addl %1,%0\n"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  LOCK_PREFIX "subl %1,%0\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>>                    : "+m" (v->counter)
>>                    : "ir" (i));
>>  }
>>
>>  /**
>> + * atomic_add_wrap - add integer to atomic variable
>> + * @i: integer value to add
>> + * @v: pointer of type atomic_wrap_t
>> + *
>> + * Atomically adds @i to @v.
>> + */
>> +static __always_inline void atomic_add_wrap(int i, atomic_wrap_t *v)
>> +{
>> +     asm volatile(LOCK_PREFIX "addl %1,%0\n"
>> +                  : "+m" (v->counter)
>> +                  : "ir" (i));
>> +}
>> +
>> +/**
>> + * atomic_add_and_test - add value from variable and test result
>> + * @i: integer value to add
>> + * @v: pointer of type atomic_t
>> + *
>> + * Atomically adds @i from @v and returns
>> + * true if the result is zero, or false for all
>> + * other cases.
>> + */
>> +static __always_inline bool atomic_add_and_test(int i, atomic_t *v)
>> +{
>> +     GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
>> +}
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static __always_inline bool atomic_add_and_test_wrap(int i, atomic_wrap_t *v)
>> +{
>> +     GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addl", v->counter, "er", i, "%0", e);
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>> +
>> +/**
>>   * atomic_sub - subtract integer from atomic variable
>>   * @i: integer value to subtract
>>   * @v: pointer of type atomic_t
>> @@ -61,7 +127,29 @@ static __always_inline void atomic_add(int i, atomic_t *v)
>>   */
>>  static __always_inline void atomic_sub(int i, atomic_t *v)
>>  {
>> -     asm volatile(LOCK_PREFIX "subl %1,%0"
>> +     asm volatile(LOCK_PREFIX "subl %1,%0\n"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  LOCK_PREFIX "addl %1,%0\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>> +                  : "+m" (v->counter)
>> +                  : "ir" (i));
>> +}
>> +
>> +/**
>> + * atomic_sub_wrap - subtract integer from atomic variable
>> + * @i: integer value to subtract
>> + * @v: pointer of type atomic_wrap_t
>> + *
>> + * Atomically subtracts @i from @v.
>> + */
>> +static __always_inline void atomic_sub_wrap(int i, atomic_wrap_t *v)
>> +{
>> +     asm volatile(LOCK_PREFIX "subl %1,%0\n"
>>                    : "+m" (v->counter)
>>                    : "ir" (i));
>>  }
>> @@ -77,7 +165,21 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
>>   */
>>  static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
>>  {
>> -     GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
>> +     GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", e);
>> +}
>> +
>> +/**
>> + * atomic_sub_and_test_wrap - subtract value from variable and test result
>> + * @i: integer value to subtract
>> + * @v: pointer of type atomic_wrap_t
>> + *
>> + * Atomically subtracts @i from @v and returns
>> + * true if the result is zero, or false for all
>> + * other cases.
>> + */
>> +static __always_inline bool atomic_sub_and_test_wrap(int i, atomic_wrap_t *v)
>> +{
>> +     GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
>>  }
>>
>>  /**
>> @@ -88,7 +190,27 @@ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
>>   */
>>  static __always_inline void atomic_inc(atomic_t *v)
>>  {
>> -     asm volatile(LOCK_PREFIX "incl %0"
>> +     asm volatile(LOCK_PREFIX "incl %0\n"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  LOCK_PREFIX "decl %0\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>> +                  : "+m" (v->counter));
>> +}
>> +
>> +/**
>> + * atomic_inc_wrap - increment atomic variable
>> + * @v: pointer of type atomic_wrap_t
>> + *
>> + * Atomically increments @v by 1.
>> + */
>> +static __always_inline void atomic_inc_wrap(atomic_wrap_t *v)
>> +{
>> +     asm volatile(LOCK_PREFIX "incl %0\n"
>>                    : "+m" (v->counter));
>>  }
>>
>> @@ -100,7 +222,27 @@ static __always_inline void atomic_inc(atomic_t *v)
>>   */
>>  static __always_inline void atomic_dec(atomic_t *v)
>>  {
>> -     asm volatile(LOCK_PREFIX "decl %0"
>> +     asm volatile(LOCK_PREFIX "decl %0\n"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  LOCK_PREFIX "incl %0\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>> +                  : "+m" (v->counter));
>> +}
>> +
>> +/**
>> + * atomic_dec_wrap - decrement atomic variable
>> + * @v: pointer of type atomic_wrap_t
>> + *
>> + * Atomically decrements @v by 1.
>> + */
>> +static __always_inline void atomic_dec_wrap(atomic_wrap_t *v)
>> +{
>> +     asm volatile(LOCK_PREFIX "decl %0\n"
>>                    : "+m" (v->counter));
>>  }
>>
>> @@ -114,9 +256,16 @@ static __always_inline void atomic_dec(atomic_t *v)
>>   */
>>  static __always_inline bool atomic_dec_and_test(atomic_t *v)
>>  {
>> -     GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
>> +     GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", e);
>>  }
>>
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static __always_inline bool atomic_dec_and_test_wrap(atomic_wrap_t *v)
>> +{
>> +     GEN_UNARY_RMWcc_wrap(LOCK_PREFIX "decl", v->counter, "%0", e);
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>> +
>>  /**
>>   * atomic_inc_and_test - increment and test
>>   * @v: pointer of type atomic_t
>> @@ -127,7 +276,20 @@ static __always_inline bool atomic_dec_and_test(atomic_t *v)
>>   */
>>  static __always_inline bool atomic_inc_and_test(atomic_t *v)
>>  {
>> -     GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
>> +     GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", e);
>> +}
>> +
>> +/**
>> + * atomic_inc_and_test_wrap - increment and test
>> + * @v: pointer of type atomic_wrap_t
>> + *
>> + * Atomically increments @v by 1
>> + * and returns true if the result is zero, or false for all
>> + * other cases.
>> + */
>> +static __always_inline int atomic_inc_and_test_wrap(atomic_wrap_t *v)
>> +{
>> +     GEN_UNARY_RMWcc_wrap(LOCK_PREFIX "incl", v->counter, "%0", e);
>>  }
>>
>>  /**
>> @@ -141,9 +303,16 @@ static __always_inline bool atomic_inc_and_test(atomic_t *v)
>>   */
>>  static __always_inline bool atomic_add_negative(int i, atomic_t *v)
>>  {
>> -     GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
>> +     GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", s);
>>  }
>>
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static __always_inline bool atomic_add_negative_wrap(int i, atomic_wrap_t *v)
>> +{
>> +     GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>> +
>>  /**
>>   * atomic_add_return - add integer and return
>>   * @i: integer value to add
>> @@ -153,6 +322,18 @@ static __always_inline bool atomic_add_negative(int i, atomic_t *v)
>>   */
>>  static __always_inline int atomic_add_return(int i, atomic_t *v)
>>  {
>> +     return i + xadd_check_overflow(&v->counter, i);
>> +}
>
> If overflow, should this function still return i + v->counter?
> (The caller would die anyway, though.)
>

Yes, because in the non-overflow case, xadd_check_overflow() would
return the previous value of v->counter.  This gets added to i and
returned, which is correct and guaranteed to not result in an overflow
(if it did, the checks in xadd_check_overflow() would kill the
process, as you noted).

In the overflow case, the caller gets killed anyway: before
xadd_check_overflow() can return, do_trap() calls
hardened_atomic_overflow() which calls BUG(), so the return statement
won't finish executing.

One thing to note about the pattern of using i +
xadd_check_overflow(): there's a potential TOCTOU issue if i can be
modified after xadd_check_overflow() returns, but before the
expression (i + xadd_check_overflow()) is evaluated.  In areas where i
is shared between threads, we might want to make (i +
xadd_check_overflow()) a critical section.

>> +
>> +/**
>> + * atomic_add_return_wrap - add integer and return
>> + * @i: integer value to add
>> + * @v: pointer of type atomic_wrap_t
>> + *
>> + * Atomically adds @i to @v and returns @i + @v
>> + */
>> +static __always_inline int atomic_add_return_wrap(int i, atomic_wrap_t *v)
>> +{
>>       return i + xadd(&v->counter, i);
>>  }
>>
>> @@ -168,8 +349,26 @@ static __always_inline int atomic_sub_return(int i, atomic_t *v)
>>       return atomic_add_return(-i, v);
>>  }
>>
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static __always_inline int atomic_sub_return_wrap(int i, atomic_wrap_t *v)
>> +{
>> +     return atomic_add_return_wrap(-i, v);
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>> +
>>  #define atomic_inc_return(v)  (atomic_add_return(1, v))
>> +static __always_inline int atomic_inc_return_wrap(atomic_wrap_t *v)
>> +{
>> +     return atomic_add_return_wrap(1, v);
>> +}
>> +
>>  #define atomic_dec_return(v)  (atomic_sub_return(1, v))
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static __always_inline int atomic_dec_return_wrap(atomic_wrap_t *v)
>> +{
>> +     return atomic_sub_return_wrap(1, v);
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>>
>>  static __always_inline int atomic_fetch_add(int i, atomic_t *v)
>>  {
>
> and atomic_fetch_add/sub() should do
>
>         return xadd_check_overflow((+/-)i, v);
>
>> @@ -186,11 +385,21 @@ static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
>>       return cmpxchg(&v->counter, old, new);
>>  }
>>
>> +static __always_inline int atomic_cmpxchg_wrap(atomic_wrap_t *v, int old, int new)
>> +{
>> +     return cmpxchg(&v->counter, old, new);
>> +}
>> +
>>  static inline int atomic_xchg(atomic_t *v, int new)
>>  {
>>       return xchg(&v->counter, new);
>>  }
>>
>> +static inline int atomic_xchg_wrap(atomic_wrap_t *v, int new)
>> +{
>> +     return xchg(&v->counter, new);
>> +}
>> +
>>  #define ATOMIC_OP(op)                                                        \
>>  static inline void atomic_##op(int i, atomic_t *v)                   \
>>  {                                                                    \
>> @@ -236,12 +445,25 @@ ATOMIC_OPS(xor, ^)
>>   */
>>  static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
>>  {
>> -     int c, old;
>> +     int c, old, new;
>>       c = atomic_read(v);
>>       for (;;) {
>>               if (unlikely(c == (u)))
>>                       break;
>> -             old = atomic_cmpxchg((v), c, c + (a));
>> +
>> +             asm volatile("addl %2,%0\n"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                          "jno 0f\n"
>> +                          "subl %2,%0\n"
>> +                          "int $4\n0:\n"
>> +                          _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>> +                          : "=r" (new)
>> +                          : "0" (c), "ir" (a));
>> +
>> +             old = atomic_cmpxchg((v), c, new);
>>               if (likely(old == c))
>>                       break;
>>               c = old;
>> @@ -250,6 +472,87 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
>>  }
>>
>>  /**
>> + * __atomic_add_unless__wrap - add unless the number is already a given value
>> + * @v: pointer of type atomic_wrap_t
>> + * @a: the amount to add to v...
>> + * @u: ...unless v is equal to u.
>> + *
>> + * Atomically adds @a to @v, so long as @v was not already @u.
>> + * Returns the old value of @v.
>> + */
>> +static __always_inline int __atomic_add_unless_wrap(atomic_wrap_t *v,
>> +                                                 int a, int u)
>> +{
>> +     int c, old, new;
>> +     c = atomic_read_wrap(v);
>> +     for (;;) {
>> +             if (unlikely(c == (u)))
>> +                     break;
>> +
>> +             asm volatile("addl %2,%0\n"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                          "jno 0f\n"
>> +                          "subl %2,%0\n"
>> +                          "int $4\n0:\n"
>> +                          _ASM_EXTABLE(0b, 0b)
>> +#endif
>
> Is this a mistake? We don't need a check here.
>

Yes, this appears to be a mistake.

>> +
>> +                          : "=r" (new)
>> +                          : "0" (c), "ir" (a));
>> +
>> +             old = atomic_cmpxchg_wrap((v), c, new);
>> +             if (likely(old == c))
>> +                     break;
>> +             c = old;
>> +     }
>> +     return c;
>> +}
>> +
>> +/**
>> ++ * atomic_inc_not_zero_hint - increment if not null
>> ++ * @v: pointer of type atomic_t
>> ++ * @hint: probable value of the atomic before the increment
>> ++ *
>> ++ * This version of atomic_inc_not_zero() gives a hint of probable
>> ++ * value of the atomic. This helps processor to not read the memory
>> ++ * before doing the atomic read/modify/write cycle, lowering
>> ++ * number of bus transactions on some arches.
>> ++ *
>> ++ * Returns: 0 if increment was not done, 1 otherwise.
>> ++ */
>> +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
>> +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
>> +{
>> +     int val, c = hint, new;
>> +
>> +     /* sanity test, should be removed by compiler if hint is a constant */
>> +     if (!hint)
>> +             return __atomic_add_unless(v, 1, 0);
>> +
>> +     do {
>> +             asm volatile("incl %0\n"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                          "jno 0f\n"
>> +                          "decl %0\n"
>> +                          "int $4\n0:\n"
>> +                          _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>> +                          : "=r" (new)
>> +                          : "0" (c));
>> +
>> +             val = atomic_cmpxchg((v), c, new);
>> +             if (val == c)
>> +                     return 1;
>> +             c = val;
>> +     } while (c);
>> +
>> +     return 0;
>> +}
>> +
>> +/**
>>   * atomic_inc_short - increment of a short integer
>>   * @v: pointer to type int
>>   *
>> diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
>> index 71d7705..7c88320 100644
>> --- a/arch/x86/include/asm/atomic64_32.h
>> +++ b/arch/x86/include/asm/atomic64_32.h
>> @@ -11,6 +11,14 @@ typedef struct {
>>       u64 __aligned(8) counter;
>>  } atomic64_t;
>>
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +typedef struct {
>> +     u64 __aligned(8) counter;
>> +} atomic64_wrap_t;
>> +#else
>> +typedef atomic64_t atomic64_wrap_t;
>> +#endif
>> +
>>  #define ATOMIC64_INIT(val)   { (val) }
>>
>>  #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
>> @@ -36,21 +44,31 @@ typedef struct {
>>       ATOMIC64_DECL_ONE(sym##_386)
>>
>>  ATOMIC64_DECL_ONE(add_386);
>> +ATOMIC64_DECL_ONE(add_wrap_386);
>>  ATOMIC64_DECL_ONE(sub_386);
>> +ATOMIC64_DECL_ONE(sub_wrap_386);
>>  ATOMIC64_DECL_ONE(inc_386);
>> +ATOMIC64_DECL_ONE(inc_wrap_386);
>>  ATOMIC64_DECL_ONE(dec_386);
>> +ATOMIC64_DECL_ONE(dec_wrap_386);
>>  #endif
>>
>>  #define alternative_atomic64(f, out, in...) \
>>       __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
>>
>>  ATOMIC64_DECL(read);
>> +ATOMIC64_DECL(read_wrap);
>>  ATOMIC64_DECL(set);
>> +ATOMIC64_DECL(set_wrap);
>>  ATOMIC64_DECL(xchg);
>>  ATOMIC64_DECL(add_return);
>> +ATOMIC64_DECL(add_return_wrap);
>>  ATOMIC64_DECL(sub_return);
>> +ATOMIC64_DECL(sub_return_wrap);
>>  ATOMIC64_DECL(inc_return);
>> +ATOMIC64_DECL(inc_return_wrap);
>>  ATOMIC64_DECL(dec_return);
>> +ATOMIC64_DECL(dec_return_wrap);
>>  ATOMIC64_DECL(dec_if_positive);
>>  ATOMIC64_DECL(inc_not_zero);
>>  ATOMIC64_DECL(add_unless);
>> @@ -76,6 +94,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
>>  }
>>
>>  /**
>> + * atomic64_cmpxchg_wrap - cmpxchg atomic64 variable
>> + * @p: pointer to type atomic64_wrap_t
>> + * @o: expected value
>> + * @n: new value
>> + *
>> + * Atomically sets @v to @n if it was equal to @o and returns
>> + * the old value.
>> + */
>> +
>> +static inline long long atomic64_cmpxchg_wrap(atomic64_wrap_t *v, long long o, long long n)
>> +{
>> +     return cmpxchg64(&v->counter, o, n);
>> +}
>> +
>> +/**
>>   * atomic64_xchg - xchg atomic64 variable
>>   * @v: pointer to type atomic64_t
>>   * @n: value to assign
>> @@ -95,6 +128,25 @@ static inline long long atomic64_xchg(atomic64_t *v, long long n)
>>  }
>>
>>  /**
>> + * atomic64_xchg_wrap - xchg atomic64 variable
>> + * @v: pointer to type atomic64_wrap_t
>> + * @n: value to assign
>> + *
>> + * Atomically xchgs the value of @v to @n and returns
>> + * the old value.
>> + */
>> +static inline long long atomic64_xchg_wrap(atomic64_wrap_t *v, long long n)
>> +{
>> +     long long o;
>> +     unsigned high = (unsigned)(n >> 32);
>> +     unsigned low = (unsigned)n;
>> +     alternative_atomic64(xchg, "=&A" (o),
>> +                          "S" (v), "b" (low), "c" (high)
>> +                          : "memory");
>> +     return o;
>> +}
>> +
>> +/**
>>   * atomic64_set - set atomic64 variable
>>   * @v: pointer to type atomic64_t
>>   * @i: value to assign
>> @@ -111,6 +163,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
>>  }
>>
>>  /**
>> + * atomic64_set_wrap - set atomic64 variable
>> + * @v: pointer to type atomic64_wrap_t
>> + * @n: value to assign
>> + *
>> + * Atomically sets the value of @v to @n.
>> + */
>> +static inline void atomic64_set_wrap(atomic64_wrap_t *v, long long i)
>> +{
>> +     unsigned high = (unsigned)(i >> 32);
>> +     unsigned low = (unsigned)i;
>> +     alternative_atomic64(set, /* no output */,
>> +                          "S" (v), "b" (low), "c" (high)
>> +                          : "eax", "edx", "memory");
>> +}
>> +
>> +/**
>>   * atomic64_read - read atomic64 variable
>>   * @v: pointer to type atomic64_t
>>   *
>> @@ -121,7 +189,20 @@ static inline long long atomic64_read(const atomic64_t *v)
>>       long long r;
>>       alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
>>       return r;
>> - }
>> +}
>> +
>> +/**
>> + * atomic64_read_wrap - read atomic64 variable
>> + * @v: pointer to type atomic64_wrap_t
>> + *
>> + * Atomically reads the value of @v and returns it.
>> + */
>> +static inline long long atomic64_read_wrap(const atomic64_wrap_t *v)
>> +{
>> +     long long r;
>> +     alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
>> +     return r;
>> +}
>>
>>  /**
>>   * atomic64_add_return - add and return
>> @@ -138,6 +219,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
>>       return i;
>>  }
>>
>> +/**
>> + * atomic64_add_return_wrap - add and return
>> + * @i: integer value to add
>> + * @v: pointer to type atomic64_wrap_t
>> + *
>> + * Atomically adds @i to @v and returns @i + *@v
>> + */
>> +static inline long long atomic64_add_return_wrap(long long i, atomic64_wrap_t *v)
>> +{
>> +     alternative_atomic64(add_return_wrap,
>> +                          ASM_OUTPUT2("+A" (i), "+c" (v)),
>> +                          ASM_NO_INPUT_CLOBBER("memory"));
>> +     return i;
>> +}
>> +
>>  /*
>>   * Other variants with different arithmetic operators:
>>   */
>> @@ -149,6 +245,14 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
>>       return i;
>>  }
>>
>> +static inline long long atomic64_sub_return_wrap(long long i, atomic64_wrap_t *v)
>> +{
>> +     alternative_atomic64(sub_return,
>
> sub_return_wrap?
>
> Thanks,
> -Takahiro AKASHI
>
>> +                          ASM_OUTPUT2("+A" (i), "+c" (v)),
>> +                          ASM_NO_INPUT_CLOBBER("memory"));
>> +     return i;
>> +}
>> +
>>  static inline long long atomic64_inc_return(atomic64_t *v)
>>  {
>>       long long a;
>> @@ -157,6 +261,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
>>       return a;
>>  }
>>
>> +static inline long long atomic64_inc_return_wrap(atomic64_wrap_t *v)
>> +{
>> +     long long a;
>> +     alternative_atomic64(inc_return_wrap, "=&A" (a),
>> +                          "S" (v) : "memory", "ecx");
>> +     return a;
>> +}
>> +
>>  static inline long long atomic64_dec_return(atomic64_t *v)
>>  {
>>       long long a;
>> @@ -165,6 +277,16 @@ static inline long long atomic64_dec_return(atomic64_t *v)
>>       return a;
>>  }
>>
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static inline long long atomic64_dec_return_wrap(atomic64_wrap_t *v)
>> +{
>> +     long long a;
>> +     alternative_atomic64(dec_return_wrap, "=&A" (a),
>> +                          "S" (v) : "memory", "ecx");
>> +     return a;
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>> +
>>  /**
>>   * atomic64_add - add integer to atomic64 variable
>>   * @i: integer value to add
>> @@ -181,6 +303,42 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
>>  }
>>
>>  /**
>> + * atomic64_add_wrap - add integer to atomic64 variable
>> + * @i: integer value to add
>> + * @v: pointer to type atomic64_wrap_t
>> + *
>> + * Atomically adds @i to @v.
>> + */
>> +static inline long long atomic64_add_wrap(long long i, atomic64_wrap_t *v)
>> +{
>> +     __alternative_atomic64(add_wrap, add_return_wrap,
>> +                            ASM_OUTPUT2("+A" (i), "+c" (v)),
>> +                            ASM_NO_INPUT_CLOBBER("memory"));
>> +     return i;
>> +}
>> +
>> +/**
>> + * atomic64_add_and_test - add value from variable and test result
>> + * @i: integer value to add
>> + * @v: pointer to type atomic64_t
>> + *
>> + * Atomically subtracts @i from @v and returns
>> + * true if the result is zero, or false for all
>> + * other cases.
>> + */
>> +static inline int atomic64_add_and_test(long long i, atomic64_t *v)
>> +{
>> +     return atomic64_add_return(i, v) == 0;
>> +}
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static inline int atomic64_add_and_test_wrap(long long i, atomic64_wrap_t *v)
>> +{
>> +     return atomic64_add_return_wrap(i, v) == 0;
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>> +
>> +/**
>>   * atomic64_sub - subtract the atomic64 variable
>>   * @i: integer value to subtract
>>   * @v: pointer to type atomic64_t
>> @@ -209,6 +367,13 @@ static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
>>       return atomic64_sub_return(i, v) == 0;
>>  }
>>
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static inline int atomic64_sub_and_test_wrap(long long i, atomic64_wrap_t *v)
>> +{
>> +     return atomic64_sub_return_wrap(i, v) == 0;
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>> +
>>  /**
>>   * atomic64_inc - increment atomic64 variable
>>   * @v: pointer to type atomic64_t
>> @@ -222,6 +387,18 @@ static inline void atomic64_inc(atomic64_t *v)
>>  }
>>
>>  /**
>> + * atomic64_inc_wrap - increment atomic64 variable
>> + * @v: pointer to type atomic64_wrap_t
>> + *
>> + * Atomically increments @v by 1.
>> + */
>> +static inline void atomic64_inc_wrap(atomic64_wrap_t *v)
>> +{
>> +     __alternative_atomic64(inc_wrap, inc_return_wrap, /* no output */,
>> +                            "S" (v) : "memory", "eax", "ecx", "edx");
>> +}
>> +
>> +/**
>>   * atomic64_dec - decrement atomic64 variable
>>   * @v: pointer to type atomic64_t
>>   *
>> @@ -246,6 +423,13 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
>>       return atomic64_dec_return(v) == 0;
>>  }
>>
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static inline int atomic64_dec_and_test_wrap(atomic64_wrap_t *v)
>> +{
>> +     return atomic64_dec_return_wrap(v) == 0;
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>> +
>>  /**
>>   * atomic64_inc_and_test - increment and test
>>   * @v: pointer to type atomic64_t
>> @@ -259,6 +443,13 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
>>       return atomic64_inc_return(v) == 0;
>>  }
>>
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static inline int atomic64_inc_and_test_wrap(atomic64_wrap_t *v)
>> +{
>> +     return atomic64_inc_return_wrap(v) == 0;
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>> +
>>  /**
>>   * atomic64_add_negative - add and test if negative
>>   * @i: integer value to add
>> @@ -273,6 +464,13 @@ static inline int atomic64_add_negative(long long i, atomic64_t *v)
>>       return atomic64_add_return(i, v) < 0;
>>  }
>>
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static inline int atomic64_add_negative_wrap(long long i, atomic64_wrap_t *v)
>> +{
>> +     return atomic64_add_return_wrap(i, v) < 0;
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>> +
>>  /**
>>   * atomic64_add_unless - add unless the number is a given value
>>   * @v: pointer of type atomic64_t
>> @@ -292,7 +490,6 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
>>       return (int)a;
>>  }
>>
>> -
>>  static inline int atomic64_inc_not_zero(atomic64_t *v)
>>  {
>>       int r;
>> diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
>> index 89ed2f6..d8d3a3d 100644
>> --- a/arch/x86/include/asm/atomic64_64.h
>> +++ b/arch/x86/include/asm/atomic64_64.h
>> @@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
>>  }
>>
>>  /**
>> + * atomic64_read_wrap - read atomic64 variable
>> + * @v: pointer of type atomic64_wrap_t
>> + *
>> + * Atomically reads the value of @v.
>> + * Doesn't imply a read memory barrier.
>> + */
>> +static inline long atomic64_read_wrap(const atomic64_wrap_t *v)
>> +{
>> +     return ACCESS_ONCE((v)->counter);
>> +}
>> +
>> +/**
>>   * atomic64_set - set atomic64 variable
>>   * @v: pointer to type atomic64_t
>>   * @i: required value
>> @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
>>  }
>>
>>  /**
>> + * atomic64_set_wrap - set atomic64 variable
>> + * @v: pointer to type atomic64_wrap_t
>> + * @i: required value
>> + *
>> + * Atomically sets the value of @v to @i.
>> + */
>> +static inline void atomic64_set_wrap(atomic64_wrap_t *v, long i)
>> +{
>> +     v->counter = i;
>> +}
>> +
>> +/**
>>   * atomic64_add - add integer to atomic64 variable
>>   * @i: integer value to add
>>   * @v: pointer to type atomic64_t
>> @@ -42,12 +66,55 @@ static inline void atomic64_set(atomic64_t *v, long i)
>>   */
>>  static __always_inline void atomic64_add(long i, atomic64_t *v)
>>  {
>> +     asm volatile(LOCK_PREFIX "addq %1,%0\n"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  LOCK_PREFIX "subq %1,%0\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>> +                  : "=m" (v->counter)
>> +                  : "er" (i), "m" (v->counter));
>> +}
>> +
>> +/**
>> + * atomic64_add_wrap - add integer to atomic64 variable
>> + * @i: integer value to add
>> + * @v: pointer to type atomic64_wrap_t
>> + *
>> + * Atomically adds @i to @v.
>> + */
>> +static __always_inline void atomic64_add_wrap(long i, atomic64_wrap_t *v)
>> +{
>>       asm volatile(LOCK_PREFIX "addq %1,%0"
>>                    : "=m" (v->counter)
>>                    : "er" (i), "m" (v->counter));
>>  }
>>
>>  /**
>> + * atomic64_add_and_test - add value from variable and test result
>> + * @i: integer value to add
>> + * @v: pointer to type atomic64_t
>> + *
>> + * Atomically adds @i from @v and returns
>> + * true if the result is zero, or false for all
>> + * other cases.
>> + */
>> +static inline bool atomic64_add_and_test(long i, atomic64_t *v)
>> +{
>> +     GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
>> +}
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static inline bool atomic64_add_and_test_wrap(long i, atomic64_wrap_t *v)
>> +{
>> +     GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addq", v->counter, "er", i, "%0", e);
>> +}
>> +#endif /* CONFIG_HARDENED_ATMOMIC */
>> +
>> +/**
>>   * atomic64_sub - subtract the atomic64 variable
>>   * @i: integer value to subtract
>>   * @v: pointer to type atomic64_t
>> @@ -56,6 +123,26 @@ static __always_inline void atomic64_add(long i, atomic64_t *v)
>>   */
>>  static inline void atomic64_sub(long i, atomic64_t *v)
>>  {
>> +     asm volatile(LOCK_PREFIX "subq %1,%0\n"
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  LOCK_PREFIX "addq %1,%0\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +                  : "=m" (v->counter)
>> +                  : "er" (i), "m" (v->counter));
>> +}
>> +
>> +/**
>> ++ * atomic64_sub_wrap - subtract the atomic64 variable
>> ++ * @i: integer value to subtract
>> ++ * @v: pointer to type atomic64_wrap_t
>> ++ *
>> ++ * Atomically subtracts @i from @v.
>> ++ */
>> +static inline void atomic64_sub_wrap(long i, atomic64_wrap_t *v)
>> +{
>>       asm volatile(LOCK_PREFIX "subq %1,%0"
>>                    : "=m" (v->counter)
>>                    : "er" (i), "m" (v->counter));
>> @@ -72,7 +159,21 @@ static inline void atomic64_sub(long i, atomic64_t *v)
>>   */
>>  static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
>>  {
>> -     GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
>> +     GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", e);
>> +}
>> +
>> +/**
>> + * atomic64_sub_and_test_wrap - subtract value from variable and test result
>> + * @i: integer value to subtract
>> + * @v: pointer to type atomic64_wrap_t
>> + *
>> + * Atomically subtracts @i from @v and returns
>> + * true if the result is zero, or false for all
>> + * other cases.
>> + */
>> +static inline bool atomic64_sub_and_test_wrap(long i, atomic64_wrap_t *v)
>> +{
>> +     GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
>>  }
>>
>>  /**
>> @@ -83,6 +184,26 @@ static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
>>   */
>>  static __always_inline void atomic64_inc(atomic64_t *v)
>>  {
>> +     asm volatile(LOCK_PREFIX "incq %0\n"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  LOCK_PREFIX "decq %0\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +                  : "=m" (v->counter)
>> +                  : "m" (v->counter));
>> +}
>> +
>> +/**
>> + * atomic64_inc_wrap - increment atomic64 variable
>> + * @v: pointer to type atomic64_wrap_t
>> + *
>> + * Atomically increments @v by 1.
>> + */
>> +static __always_inline void atomic64_inc_wrap(atomic64_wrap_t *v)
>> +{
>>       asm volatile(LOCK_PREFIX "incq %0"
>>                    : "=m" (v->counter)
>>                    : "m" (v->counter));
>> @@ -96,6 +217,26 @@ static __always_inline void atomic64_inc(atomic64_t *v)
>>   */
>>  static __always_inline void atomic64_dec(atomic64_t *v)
>>  {
>> +     asm volatile(LOCK_PREFIX "decq %0\n"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  LOCK_PREFIX "incq %0\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +                  : "=m" (v->counter)
>> +                  : "m" (v->counter));
>> +}
>> +
>> +/**
>> + * atomic64_dec_wrap - decrement atomic64 variable
>> + * @v: pointer to type atomic64_wrap_t
>> + *
>> + * Atomically decrements @v by 1.
>> + */
>> +static __always_inline void atomic64_dec_wrap(atomic64_wrap_t *v)
>> +{
>>       asm volatile(LOCK_PREFIX "decq %0"
>>                    : "=m" (v->counter)
>>                    : "m" (v->counter));
>> @@ -111,8 +252,15 @@ static __always_inline void atomic64_dec(atomic64_t *v)
>>   */
>>  static inline bool atomic64_dec_and_test(atomic64_t *v)
>>  {
>> -     GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
>> +     GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", e);
>> +}
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static inline bool atomic64_dec_and_test_wrap(atomic64_wrap_t *v)
>> +{
>> +     GEN_UNARY_RMWcc_wrap(LOCK_PREFIX "decq", v->counter, "%0", e);
>>  }
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>>
>>  /**
>>   * atomic64_inc_and_test - increment and test
>> @@ -124,8 +272,15 @@ static inline bool atomic64_dec_and_test(atomic64_t *v)
>>   */
>>  static inline bool atomic64_inc_and_test(atomic64_t *v)
>>  {
>> -     GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
>> +     GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", e);
>> +}
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static inline bool atomic64_inc_and_test_wrap(atomic64_wrap_t *v)
>> +{
>> +     GEN_UNARY_RMWcc_wrap(LOCK_PREFIX "incq", v->counter, "%0", e);
>>  }
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>>
>>  /**
>>   * atomic64_add_negative - add and test if negative
>> @@ -138,8 +293,15 @@ static inline bool atomic64_inc_and_test(atomic64_t *v)
>>   */
>>  static inline bool atomic64_add_negative(long i, atomic64_t *v)
>>  {
>> -     GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
>> +     GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", s);
>> +}
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static inline bool atomic64_add_negative_wrap(long i, atomic64_wrap_t *v)
>> +{
>> +     GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
>>  }
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>>
>>  /**
>>   * atomic64_add_return - add and return
>> @@ -150,6 +312,11 @@ static inline bool atomic64_add_negative(long i, atomic64_t *v)
>>   */
>>  static __always_inline long atomic64_add_return(long i, atomic64_t *v)
>>  {
>> +     return i + xadd_check_overflow(&v->counter, i);
>> +}
>> +
>> +static __always_inline long atomic64_add_return_wrap(long i, atomic64_wrap_t *v)
>> +{
>>       return i + xadd(&v->counter, i);
>>  }
>>
>> @@ -158,6 +325,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
>>       return atomic64_add_return(-i, v);
>>  }
>>
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static inline long atomic64_sub_return_wrap(long i, atomic64_wrap_t *v)
>> +{
>> +     return atomic64_add_return_wrap(-i, v);
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>> +
>>  static inline long atomic64_fetch_add(long i, atomic64_t *v)
>>  {
>>       return xadd(&v->counter, i);
>> @@ -171,16 +345,29 @@ static inline long atomic64_fetch_sub(long i, atomic64_t *v)
>>  #define atomic64_inc_return(v)  (atomic64_add_return(1, (v)))
>>  #define atomic64_dec_return(v)  (atomic64_sub_return(1, (v)))
>>
>> +#define atomic64_inc_return_wrap(v)  (atomic64_add_return_wrap(1, (v)))
>> +#define atomic64_dec_return_wrap(v)  (atomic64_sub_return_wrap(1, (v)))
>> +
>>  static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
>>  {
>>       return cmpxchg(&v->counter, old, new);
>>  }
>>
>> +static inline long atomic64_cmpxchg_wrap(atomic64_wrap_t *v, long old, long new)
>> +{
>> +     return cmpxchg(&v->counter, old, new);
>> +}
>> +
>>  static inline long atomic64_xchg(atomic64_t *v, long new)
>>  {
>>       return xchg(&v->counter, new);
>>  }
>>
>> +static inline long atomic64_xchg_wrap(atomic64_wrap_t *v, long new)
>> +{
>> +     return xchg(&v->counter, new);
>> +}
>> +
>>  /**
>>   * atomic64_add_unless - add unless the number is a given value
>>   * @v: pointer of type atomic64_t
>> @@ -192,11 +379,21 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
>>   */
>>  static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
>>  {
>> -     long c, old;
>> +     long c, old, new;
>>       c = atomic64_read(v);
>>       for (;;) {
>>               if (unlikely(c == (u)))
>>                       break;
>> +             asm volatile("add %2,%0\n"
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                          "jno 0f\n"
>> +                          "sub %2,%0\n"
>> +                          "int $4\n0:\n"
>> +                          _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +                          : "=r" (new)
>> +                          : "0" (c), "ir" (a));
>> +
>>               old = atomic64_cmpxchg((v), c, c + (a));
>>               if (likely(old == c))
>>                       break;
>> @@ -205,6 +402,27 @@ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
>>       return c != (u);
>>  }
>>
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +static inline bool atomic64_add_unless_wrap(atomic64_wrap_t *v, long a, long u)
>> +{
>> +     long c, old, new;
>> +     c = atomic64_read_wrap(v);
>> +     for (;;) {
>> +             if (unlikely(c == (u)))
>> +                     break;
>> +             asm volatile("add %2,%0\n"
>> +                          : "=r" (new)
>> +                          : "0" (c), "ir" (a));
>> +
>> +             old = atomic64_cmpxchg_wrap((v), c, c + (a));
>> +             if (likely(old == c))
>> +                     break;
>> +             c = old;
>> +     }
>> +     return c != (u);
>> +}
>> +#endif /* CONFIG_HARDENED_ATOMIC */
>> +
>>  #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
>>
>>  /*
>> diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
>> index 68557f52..e25eb0d 100644
>> --- a/arch/x86/include/asm/bitops.h
>> +++ b/arch/x86/include/asm/bitops.h
>> @@ -50,7 +50,7 @@
>>   * a mask operation on a byte.
>>   */
>>  #define IS_IMMEDIATE(nr)             (__builtin_constant_p(nr))
>> -#define CONST_MASK_ADDR(nr, addr)    BITOP_ADDR((void *)(addr) + ((nr)>>3))
>> +#define CONST_MASK_ADDR(nr, addr)    BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
>>  #define CONST_MASK(nr)                       (1 << ((nr) & 7))
>>
>>  /**
>> @@ -203,7 +203,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
>>   */
>>  static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
>>  {
>> -     GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
>> +     GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
>>  }
>>
>>  /**
>> @@ -249,7 +249,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
>>   */
>>  static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
>>  {
>> -     GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
>> +     GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
>>  }
>>
>>  /**
>> @@ -302,7 +302,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
>>   */
>>  static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
>>  {
>> -     GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
>> +     GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
>>  }
>>
>>  static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
>> diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
>> index 9733361..b83f612 100644
>> --- a/arch/x86/include/asm/cmpxchg.h
>> +++ b/arch/x86/include/asm/cmpxchg.h
>> @@ -13,10 +13,14 @@ extern void __xchg_wrong_size(void)
>>       __compiletime_error("Bad argument size for xchg");
>>  extern void __cmpxchg_wrong_size(void)
>>       __compiletime_error("Bad argument size for cmpxchg");
>> +extern void __xadd_check_overflow_wrong_size(void)
>> +     __compiletime_error("Bad argument size for xadd_check_overflow");
>>  extern void __xadd_wrong_size(void)
>>       __compiletime_error("Bad argument size for xadd");
>>  extern void __add_wrong_size(void)
>>       __compiletime_error("Bad argument size for add");
>> +extern void __add_check_overflow_wrong_size(void)
>> +     __compiletime_error("Bad argument size for add_check_overflow");
>>
>>  /*
>>   * Constants for operation sizes. On 32-bit, the 64-bit size it set to
>> @@ -68,6 +72,38 @@ extern void __add_wrong_size(void)
>>               __ret;                                                  \
>>       })
>>
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +#define __xchg_op_check_overflow(ptr, arg, op, lock)                 \
>> +     ({                                                              \
>> +             __typeof__ (*(ptr)) __ret = (arg);                      \
>> +             switch (sizeof(*(ptr))) {                               \
>> +             case __X86_CASE_L:                                      \
>> +                     asm volatile (lock #op "l %0, %1\n"             \
>> +                                   "jno 0f\n"                        \
>> +                                   "mov %0,%1\n"                     \
>> +                                   "int $4\n0:\n"                    \
>> +                                   _ASM_EXTABLE(0b, 0b)              \
>> +                                   : "+r" (__ret), "+m" (*(ptr))     \
>> +                                   : : "memory", "cc");              \
>> +                     break;                                          \
>> +             case __X86_CASE_Q:                                      \
>> +                     asm volatile (lock #op "q %q0, %1\n"            \
>> +                                   "jno 0f\n"                        \
>> +                                   "mov %0,%1\n"                     \
>> +                                   "int $4\n0:\n"                    \
>> +                                   _ASM_EXTABLE(0b, 0b)              \
>> +                                   : "+r" (__ret), "+m" (*(ptr))     \
>> +                                   : : "memory", "cc");              \
>> +                     break;                                          \
>> +             default:                                                \
>> +                     __ ## op ## _check_overflow_wrong_size();       \
>> +             }                                                       \
>> +             __ret;                                                  \
>> +     })
>> +#else
>> +#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
>> +#endif
>> +
>>  /*
>>   * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
>>   * Since this is generally used to protect other memory information, we
>> @@ -166,6 +202,9 @@ extern void __add_wrong_size(void)
>>  #define xadd_sync(ptr, inc)  __xadd((ptr), (inc), "lock; ")
>>  #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
>>
>> +#define __xadd_check_overflow(ptr, inc, lock)        __xchg_op_check_overflow((ptr), (inc), xadd, lock)
>> +#define xadd_check_overflow(ptr, inc)                __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
>> +
>>  #define __add(ptr, inc, lock)                                                \
>>       ({                                                              \
>>               __typeof__ (*(ptr)) __ret = (inc);                      \
>> diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
>> index 7511978..46cfaf0 100644
>> --- a/arch/x86/include/asm/local.h
>> +++ b/arch/x86/include/asm/local.h
>> @@ -10,25 +10,69 @@ typedef struct {
>>       atomic_long_t a;
>>  } local_t;
>>
>> +typedef struct {
>> +     atomic_long_wrap_t a;
>> +} local_wrap_t;
>> +
>>  #define LOCAL_INIT(i)        { ATOMIC_LONG_INIT(i) }
>>
>>  #define local_read(l)        atomic_long_read(&(l)->a)
>> +#define local_read_wrap(l)   atomic_long_read_wrap(&(l)->a)
>>  #define local_set(l, i)      atomic_long_set(&(l)->a, (i))
>> +#define local_set_wrap(l, i) atomic_long_set_wrap(&(l)->a, (i))
>>
>>  static inline void local_inc(local_t *l)
>>  {
>> +     asm volatile(_ASM_INC "%0\n"
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  _ASM_DEC "%0\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +                  : "+m" (l->a.counter));
>> +}
>> +
>> +static inline void local_inc_wrap(local_wrap_t *l)
>> +{
>>       asm volatile(_ASM_INC "%0"
>>                    : "+m" (l->a.counter));
>>  }
>>
>>  static inline void local_dec(local_t *l)
>>  {
>> +     asm volatile(_ASM_DEC "%0\n"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  _ASM_INC "%0\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +                  : "+m" (l->a.counter));
>> +}
>> +
>> +static inline void local_dec_wrap(local_wrap_t *l)
>> +{
>>       asm volatile(_ASM_DEC "%0"
>>                    : "+m" (l->a.counter));
>>  }
>>
>>  static inline void local_add(long i, local_t *l)
>>  {
>> +     asm volatile(_ASM_ADD "%1,%0\n"
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  _ASM_SUB "%1,%0\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +                  : "+m" (l->a.counter)
>> +                  : "ir" (i));
>> +}
>> +
>> +static inline void local_add_wrap(long i, local_wrap_t *l)
>> +{
>>       asm volatile(_ASM_ADD "%1,%0"
>>                    : "+m" (l->a.counter)
>>                    : "ir" (i));
>> @@ -36,6 +80,19 @@ static inline void local_add(long i, local_t *l)
>>
>>  static inline void local_sub(long i, local_t *l)
>>  {
>> +     asm volatile(_ASM_SUB "%1,%0\n"
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  _ASM_ADD "%1,%0\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +                  : "+m" (l->a.counter)
>> +                  : "ir" (i));
>> +}
>> +
>> +static inline void local_sub_wrap(long i, local_wrap_t *l)
>> +{
>>       asm volatile(_ASM_SUB "%1,%0"
>>                    : "+m" (l->a.counter)
>>                    : "ir" (i));
>> @@ -52,7 +109,7 @@ static inline void local_sub(long i, local_t *l)
>>   */
>>  static inline bool local_sub_and_test(long i, local_t *l)
>>  {
>> -     GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e);
>> +     GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", e);
>>  }
>>
>>  /**
>> @@ -65,7 +122,7 @@ static inline bool local_sub_and_test(long i, local_t *l)
>>   */
>>  static inline bool local_dec_and_test(local_t *l)
>>  {
>> -     GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e);
>> +     GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", e);
>>  }
>>
>>  /**
>> @@ -78,7 +135,7 @@ static inline bool local_dec_and_test(local_t *l)
>>   */
>>  static inline bool local_inc_and_test(local_t *l)
>>  {
>> -     GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e);
>> +     GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", e);
>>  }
>>
>>  /**
>> @@ -92,7 +149,7 @@ static inline bool local_inc_and_test(local_t *l)
>>   */
>>  static inline bool local_add_negative(long i, local_t *l)
>>  {
>> -     GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s);
>> +     GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", s);
>>  }
>>
>>  /**
>> @@ -105,6 +162,28 @@ static inline bool local_add_negative(long i, local_t *l)
>>  static inline long local_add_return(long i, local_t *l)
>>  {
>>       long __i = i;
>> +     asm volatile(_ASM_XADD "%0, %1\n"
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  _ASM_MOV "%0,%1\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +                  : "+r" (i), "+m" (l->a.counter)
>> +                  : : "memory");
>> +     return i + __i;
>> +}
>> +
>> +/**
>> + * local_add_return_wrap - add and return
>> + * @i: integer value to add
>> + * @l: pointer to type local_wrap_t
>> + *
>> + * Atomically adds @i to @l and returns @i + @l
>> + */
>> +static inline long local_add_return_wrap(long i, local_wrap_t *l)
>> +{
>> +     long __i = i;
>>       asm volatile(_ASM_XADD "%0, %1;"
>>                    : "+r" (i), "+m" (l->a.counter)
>>                    : : "memory");
>> @@ -121,6 +200,8 @@ static inline long local_sub_return(long i, local_t *l)
>>
>>  #define local_cmpxchg(l, o, n) \
>>       (cmpxchg_local(&((l)->a.counter), (o), (n)))
>> +#define local_cmpxchg_wrap(l, o, n) \
>> +     (cmpxchg_local(&((l)->a.counter), (o), (n)))
>>  /* Always has a lock prefix */
>>  #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
>>
>> diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
>> index 17f2186..2fa0e84 100644
>> --- a/arch/x86/include/asm/preempt.h
>> +++ b/arch/x86/include/asm/preempt.h
>> @@ -81,7 +81,7 @@ static __always_inline void __preempt_count_sub(int val)
>>   */
>>  static __always_inline bool __preempt_count_dec_and_test(void)
>>  {
>> -     GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
>> +    GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), e);
>>  }
>>
>>  /*
>> diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
>> index 661dd30..0375d3f 100644
>> --- a/arch/x86/include/asm/rmwcc.h
>> +++ b/arch/x86/include/asm/rmwcc.h
>> @@ -5,28 +5,80 @@
>>
>>  /* Use asm goto */
>>
>> -#define __GEN_RMWcc(fullop, var, cc, ...)                            \
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)                        \
>>  do {                                                                 \
>> -     asm_volatile_goto (fullop "; j" #cc " %l[cc_label]"             \
>> +     asm_volatile_goto (fullop                                       \
>> +                     ";jno 0f\n"                                     \
>> +                     fullantiop                                      \
>> +                     ";int $4\n0:\n"                                 \
>> +                     _ASM_EXTABLE(0b, 0b)                            \
>> +                      ";j" #cc " %l[cc_label]"                       \
>>                       : : "m" (var), ## __VA_ARGS__                   \
>>                       : "memory" : cc_label);                         \
>>       return 0;                                                       \
>>  cc_label:                                                            \
>>       return 1;                                                       \
>>  } while (0)
>> +#else
>> +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)                        \
>> +do {                                                                 \
>> +     asm_volatile_goto (fullop ";j" #cc " %l[cc_label]"              \
>> +                     : : "m" (var), ## __VA_ARGS__                   \
>> +                     : "memory" : cc_label);                         \
>> +     return 0;                                                       \
>> +cc_label:                                                            \
>> +     return 1;                                                       \
>> +} while (0)
>> +#endif
>>
>> -#define GEN_UNARY_RMWcc(op, var, arg0, cc)                           \
>> -     __GEN_RMWcc(op " " arg0, var, cc)
>> -
>> -#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                       \
>> -     __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
>> +#define __GEN_RMWcc_wrap(fullop, var, cc, ...)do {                                                                   \
>> +     asm_volatile_goto (fullop "; j" #cc " %l[cc_label]"             \
>> +                     : : "m" (var), ## __VA_ARGS__                   \
>> +                     : "memory" : cc_label);                         \
>> +     return 0;                                                       \
>> +cc_label:                                                            \
>> +     return 1;                                                       \
>> +} while (0)
>>
>> +#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc)                   \
>> +     __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
>> +#define GEN_UNARY_RMWcc_wrap(op, var, arg0, cc)                      \
>> +     __GEN_RMWcc_wrap(op " " arg0, var, cc)
>> +#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc)               \
>> +     __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
>> +#define GEN_BINARY_RMWcc_wrap(op, var, vcon, val, arg0, cc)  \
>> +     __GEN_RMWcc_wrap(op " %1, " arg0, var, cc, vcon (val))
>>  #else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
>>
>>  /* Use flags output or a set instruction */
>>
>> -#define __GEN_RMWcc(fullop, var, cc, ...)                            \
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)                        \
>>  do {                                                                 \
>> +     char c;                                                         \
>> +     asm volatile (fullop                                            \
>> +                     ";jno 0f\n"                                     \
>> +                     fullantiop                                      \
>> +                     ";int $4\n0:\n"                                 \
>> +                     _ASM_EXTABLE(0b, 0b)                            \
>> +                     ";" CC_SET(cc)                          \
>> +                     : "+m" (var), CC_OUT(cc) (c)                    \
>> +                     : __VA_ARGS__ : "memory");                      \
>> +     return c != 0;                                                  \
>> +} while (0)
>> +#else
>> +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)                        \
>> +do {                                                                 \
>> +     char c;                                                         \
>> +     asm volatile (fullop ";" CC_SET(cc)                             \
>> +                     : "+m" (var), CC_OUT(cc) (c)                    \
>> +                     : __VA_ARGS__ : "memory");                      \
>> +     return c != 0;                                                  \
>> +} while (0)
>> +#endif
>> +
>> +#define __GEN_RMWcc_wrap(fullop, var, cc, ...)do {                                                                   \
>>       bool c;                                                         \
>>       asm volatile (fullop ";" CC_SET(cc)                             \
>>                       : "+m" (var), CC_OUT(cc) (c)                    \
>> @@ -34,12 +86,14 @@ do {                                                                      \
>>       return c;                                                       \
>>  } while (0)
>>
>> -#define GEN_UNARY_RMWcc(op, var, arg0, cc)                           \
>> -     __GEN_RMWcc(op " " arg0, var, cc)
>> -
>> -#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                       \
>> -     __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
>> -
>> +#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc)                   \
>> +     __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
>> +#define GEN_UNARY_RMWcc_wrap(op, var, arg0, cc)                      \
>> +     __GEN_RMWcc_wrap(op " " arg0, var, cc)
>> +#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc)               \
>> +     __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
>> +#define GEN_BINARY_RMWcc_wrap(op, var, vcon, val, arg0, cc)  \
>> +     __GEN_RMWcc_wrap(op " %2, " arg0, var, cc, vcon (val))
>>  #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
>>
>>  #endif /* _ASM_X86_RMWcc */
>> diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
>> index 3d33a71..4d3f8a5 100644
>> --- a/arch/x86/include/asm/rwsem.h
>> +++ b/arch/x86/include/asm/rwsem.h
>> @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
>>  {
>>       asm volatile("# beginning down_read\n\t"
>>                    LOCK_PREFIX _ASM_INC "(%1)\n\t"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  LOCK_PREFIX _ASM_DEC "(%1)\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>>                    /* adds 0x00000001 */
>>                    "  jns        1f\n"
>>                    "  call call_rwsem_down_read_failed\n"
>> @@ -85,6 +93,14 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
>>                    "1:\n\t"
>>                    "  mov          %1,%2\n\t"
>>                    "  add          %3,%2\n\t"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  "sub %3,%2\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>>                    "  jle          2f\n\t"
>>                    LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
>>                    "  jnz          1b\n\t"
>> @@ -99,12 +115,22 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
>>  /*
>>   * lock for writing
>>   */
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +#define ____down_write_undo \
>> +                  "jno 0f\n"\
>> +                  "mov %1,(%2)\n"\
>> +                  "int $4\n0:\n"\
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#else
>> +#define ____down_write_undo
>> +#endif
>>  #define ____down_write(sem, slow_path)                       \
>>  ({                                                   \
>>       long tmp;                                       \
>>       struct rw_semaphore* ret;                       \
>>       asm volatile("# beginning down_write\n\t"       \
>>                    LOCK_PREFIX "  xadd      %1,(%3)\n\t"      \
>> +                  ____down_write_undo                \
>>                    /* adds 0xffff0001, returns the old value */ \
>>                    "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
>>                    /* was the active mask 0 before? */\
>> @@ -166,6 +192,14 @@ static inline void __up_read(struct rw_semaphore *sem)
>>       long tmp;
>>       asm volatile("# beginning __up_read\n\t"
>>                    LOCK_PREFIX "  xadd      %1,(%2)\n\t"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  "mov %1,(%2)\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>>                    /* subtracts 1, returns the old value */
>>                    "  jns        1f\n\t"
>>                    "  call call_rwsem_wake\n" /* expects old value in %edx */
>> @@ -184,6 +218,14 @@ static inline void __up_write(struct rw_semaphore *sem)
>>       long tmp;
>>       asm volatile("# beginning __up_write\n\t"
>>                    LOCK_PREFIX "  xadd      %1,(%2)\n\t"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  "mov %1,(%2)\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>>                    /* subtracts 0xffff0001, returns the old value */
>>                    "  jns        1f\n\t"
>>                    "  call call_rwsem_wake\n" /* expects old value in %edx */
>> @@ -201,6 +243,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
>>  {
>>       asm volatile("# beginning __downgrade_write\n\t"
>>                    LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
>> +
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +                  "jno 0f\n"
>> +                  LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
>> +                  "int $4\n0:\n"
>> +                  _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>>                    /*
>>                     * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
>>                     *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
>> diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
>> index bd4e3d4..d67a914 100644
>> --- a/arch/x86/kernel/traps.c
>> +++ b/arch/x86/kernel/traps.c
>> @@ -191,6 +191,10 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
>>                       tsk->thread.trap_nr = trapnr;
>>                       die(str, regs, error_code);
>>               }
>> +
>> +             if (trapnr == X86_TRAP_OF)
>> +                     hardened_atomic_overflow(regs);
>> +
>>               return 0;
>>       }
>>
>> diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
>> index 9b0ca8f..0e8a888 100644
>> --- a/arch/x86/lib/atomic64_386_32.S
>> +++ b/arch/x86/lib/atomic64_386_32.S
>> @@ -45,6 +45,10 @@ BEGIN(read)
>>       movl  (v), %eax
>>       movl 4(v), %edx
>>  RET_ENDP
>> +BEGIN(read_wrap)
>> +     movl  (v), %eax
>> +     movl 4(v), %edx
>> +RET_ENDP
>>  #undef v
>>
>>  #define v %esi
>> @@ -52,6 +56,10 @@ BEGIN(set)
>>       movl %ebx,  (v)
>>       movl %ecx, 4(v)
>>  RET_ENDP
>> +BEGIN(set_wrap)
>> +     movl %ebx,  (v)
>> +     movl %ecx, 4(v)
>> +RET_ENDP
>>  #undef v
>>
>>  #define v  %esi
>> @@ -67,6 +75,18 @@ RET_ENDP
>>  BEGIN(add)
>>       addl %eax,  (v)
>>       adcl %edx, 4(v)
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     jno 0f
>> +     subl %eax,  (v)
>> +     sbbl %edx, 4(v)
>> +     int $4
>> +0:
>> +     _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +RET_ENDP
>> +BEGIN(add_wrap)
>> +     addl %eax,  (v)
>> +     adcl %edx, 4(v)
>>  RET_ENDP
>>  #undef v
>>
>> @@ -74,6 +94,20 @@ RET_ENDP
>>  BEGIN(add_return)
>>       addl  (v), %eax
>>       adcl 4(v), %edx
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     into
>> +1234:
>> +     _ASM_EXTABLE(1234b, 2f)
>> +#endif
>> +     movl %eax,  (v)
>> +     movl %edx, 4(v)
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +2:
>> +#endif
>> +RET_ENDP
>> +BEGIN(add_return_wrap)
>> +     addl  (v), %eax
>> +     adcl 4(v), %edx
>>       movl %eax,  (v)
>>       movl %edx, 4(v)
>>  RET_ENDP
>> @@ -83,6 +117,18 @@ RET_ENDP
>>  BEGIN(sub)
>>       subl %eax,  (v)
>>       sbbl %edx, 4(v)
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     jno 0f
>> +     addl %eax,  (v)
>> +     adcl %edx, 4(v)
>> +     int $4
>> +0:
>> +     _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +RET_ENDP
>> +BEGIN(sub_wrap)
>> +     subl %eax,  (v)
>> +     sbbl %edx, 4(v)
>>  RET_ENDP
>>  #undef v
>>
>> @@ -93,6 +139,23 @@ BEGIN(sub_return)
>>       sbbl $0, %edx
>>       addl  (v), %eax
>>       adcl 4(v), %edx
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     into
>> +1234:
>> +     _ASM_EXTABLE(1234b, 2f)
>> +#endif
>> +     movl %eax,  (v)
>> +     movl %edx, 4(v)
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +2:
>> +#endif
>> +RET_ENDP
>> +BEGIN(sub_return_wrap)
>> +     negl %edx
>> +     negl %eax
>> +     sbbl $0, %edx
>> +     addl  (v), %eax
>> +     adcl 4(v), %edx
>>       movl %eax,  (v)
>>       movl %edx, 4(v)
>>  RET_ENDP
>> @@ -102,6 +165,19 @@ RET_ENDP
>>  BEGIN(inc)
>>       addl $1,  (v)
>>       adcl $0, 4(v)
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     jno 0f
>> +     subl $1,  (v)
>> +     sbbl $0, 4(v)
>> +     int $4
>> +0:
>> +     _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +
>> +RET_ENDP
>> +BEGIN(inc_wrap)
>> +     addl $1,  (v)
>> +     adcl $0, 4(v)
>>  RET_ENDP
>>  #undef v
>>
>> @@ -111,6 +187,22 @@ BEGIN(inc_return)
>>       movl 4(v), %edx
>>       addl $1, %eax
>>       adcl $0, %edx
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     into
>> +1234:
>> +     _ASM_EXTABLE(1234b, 2f)
>> +#endif
>> +     movl %eax,  (v)
>> +     movl %edx, 4(v)
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +2:
>> +#endif
>> +RET_ENDP
>> +BEGIN(inc_return_wrap)
>> +     movl  (v), %eax
>> +     movl 4(v), %edx
>> +     addl $1, %eax
>> +     adcl $0, %edx
>>       movl %eax,  (v)
>>       movl %edx, 4(v)
>>  RET_ENDP
>> @@ -120,6 +212,18 @@ RET_ENDP
>>  BEGIN(dec)
>>       subl $1,  (v)
>>       sbbl $0, 4(v)
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     jno 0f
>> +     addl $1,  (v)
>> +     adcl $0, 4(v)
>> +     int $4
>> +0:
>> +     _ASM_EXTABLE(0b, 0b)
>> +#endif
>> +RET_ENDP
>> +BEGIN(dec_wrap)
>> +     subl $1,  (v)
>> +     sbbl $0, 4(v)
>>  RET_ENDP
>>  #undef v
>>
>> @@ -129,6 +233,22 @@ BEGIN(dec_return)
>>       movl 4(v), %edx
>>       subl $1, %eax
>>       sbbl $0, %edx
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     into
>> +1234:
>> +     _ASM_EXTABLE(1234b, 2f)
>> +#endif
>> +     movl %eax,  (v)
>> +     movl %edx, 4(v)
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +2:
>> +#endif
>> +RET_ENDP
>> +BEGIN(dec_return_wrap)
>> +     movl  (v), %eax
>> +     movl 4(v), %edx
>> +     subl $1, %eax
>> +     sbbl $0, %edx
>>       movl %eax,  (v)
>>       movl %edx, 4(v)
>>  RET_ENDP
>> @@ -140,6 +260,11 @@ BEGIN(add_unless)
>>       adcl %edx, %edi
>>       addl  (v), %eax
>>       adcl 4(v), %edx
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     into
>> +1234:
>> +     _ASM_EXTABLE(1234b, 2f)
>> +#endif
>>       cmpl %eax, %ecx
>>       je 3f
>>  1:
>> @@ -165,6 +290,11 @@ BEGIN(inc_not_zero)
>>  1:
>>       addl $1, %eax
>>       adcl $0, %edx
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     into
>> +1234:
>> +     _ASM_EXTABLE(1234b, 2f)
>> +#endif
>>       movl %eax,  (v)
>>       movl %edx, 4(v)
>>       movl $1, %eax
>> @@ -183,6 +313,11 @@ BEGIN(dec_if_positive)
>>       movl 4(v), %edx
>>       subl $1, %eax
>>       sbbl $0, %edx
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     into
>> +1234:
>> +     _ASM_EXTABLE(1234b, 1f)
>> +#endif
>>       js 1f
>>       movl %eax,  (v)
>>       movl %edx, 4(v)
>> diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
>> index db3ae854..5bd864e 100644
>> --- a/arch/x86/lib/atomic64_cx8_32.S
>> +++ b/arch/x86/lib/atomic64_cx8_32.S
>> @@ -22,9 +22,19 @@
>>
>>  ENTRY(atomic64_read_cx8)
>>       read64 %ecx
>> +     /* Pax has pax_force_retaddr here
>> +      * do we want similar? If yes, changes
>> +      * have to be made in more places below */
>>       ret
>>  ENDPROC(atomic64_read_cx8)
>>
>> +ENTRY(atomic64_read_wrap_cx8)
>> +     read64 %ecx
>> +/* do we want smth like the below line?
>> + *   pax_force_retaddr */
>> +     ret
>> +ENDPROC(atomic64_read_wrap_cx8)
>> +
>>  ENTRY(atomic64_set_cx8)
>>  1:
>>  /* we don't need LOCK_PREFIX since aligned 64-bit writes
>> @@ -35,6 +45,17 @@ ENTRY(atomic64_set_cx8)
>>       ret
>>  ENDPROC(atomic64_set_cx8)
>>
>> +ENTRY(atomic64_set_wrap_cx8)
>> +1:
>> +/* we don't need LOCK_PREFIX since aligned 64-bit writes
>> + * are atomic on 586 and newer */
>> +     cmpxchg8b (%esi)
>> +     jne 1b
>> +
>> +     /* pax_force_retaddr */
>> +     ret
>> +ENDPROC(atomic64_set_wrap_cx8)
>> +
>>  ENTRY(atomic64_xchg_cx8)
>>  1:
>>       LOCK_PREFIX
>> @@ -44,8 +65,8 @@ ENTRY(atomic64_xchg_cx8)
>>       ret
>>  ENDPROC(atomic64_xchg_cx8)
>>
>> -.macro addsub_return func ins insc
>> -ENTRY(atomic64_\func\()_return_cx8)
>> +.macro addsub_return func ins insc wrap=""
>> +ENTRY(atomic64_\func\()_return\wrap\()_cx8)
>>       pushl %ebp
>>       pushl %ebx
>>       pushl %esi
>> @@ -61,6 +82,13 @@ ENTRY(atomic64_\func\()_return_cx8)
>>       movl %edx, %ecx
>>       \ins\()l %esi, %ebx
>>       \insc\()l %edi, %ecx
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +.ifb \wrap
>> +     into
>> +2:
>> +     _ASM_EXTABLE(2b, 3f)
>> +.endif
>> +#endif
>>       LOCK_PREFIX
>>       cmpxchg8b (%ebp)
>>       jne 1b
>> @@ -68,19 +96,27 @@ ENTRY(atomic64_\func\()_return_cx8)
>>  10:
>>       movl %ebx, %eax
>>       movl %ecx, %edx
>> +
>> +.ifb \wrap
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +3:
>> +#endif
>> +.endif
>>       popl %edi
>>       popl %esi
>>       popl %ebx
>>       popl %ebp
>>       ret
>> -ENDPROC(atomic64_\func\()_return_cx8)
>> +ENDPROC(atomic64_\func\()_return\wrap\()_cx8)
>>  .endm
>>
>>  addsub_return add add adc
>>  addsub_return sub sub sbb
>> +addsub_return add add adc _wrap
>> +addsub_return sub sub sbb _wrap
>>
>> -.macro incdec_return func ins insc
>> -ENTRY(atomic64_\func\()_return_cx8)
>> +.macro incdec_return func ins insc wrap=""
>> +ENTRY(atomic64_\func\()_return\wrap\()_cx8)
>>       pushl %ebx
>>
>>       read64 %esi
>> @@ -89,6 +125,13 @@ ENTRY(atomic64_\func\()_return_cx8)
>>       movl %edx, %ecx
>>       \ins\()l $1, %ebx
>>       \insc\()l $0, %ecx
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +.ifb \wrap
>> +     into
>> +2:
>> +     _ASM_EXTABLE(2b, 3f)
>> +.endif
>> +#endif
>>       LOCK_PREFIX
>>       cmpxchg8b (%esi)
>>       jne 1b
>> @@ -96,13 +139,21 @@ ENTRY(atomic64_\func\()_return_cx8)
>>  10:
>>       movl %ebx, %eax
>>       movl %ecx, %edx
>> +
>> +.ifb \wrap
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +3:
>> +#endif
>> +.endif
>>       popl %ebx
>>       ret
>> -ENDPROC(atomic64_\func\()_return_cx8)
>> +ENDPROC(atomic64_\func\()_return\wrap\()_cx8)
>>  .endm
>>
>>  incdec_return inc add adc
>>  incdec_return dec sub sbb
>> +incdec_return inc add adc _wrap
>> +incdec_return dec sub sbb _wrap
>>
>>  ENTRY(atomic64_dec_if_positive_cx8)
>>       pushl %ebx
>> @@ -113,6 +164,11 @@ ENTRY(atomic64_dec_if_positive_cx8)
>>       movl %edx, %ecx
>>       subl $1, %ebx
>>       sbb $0, %ecx
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     into
>> +1234:
>> +     _ASM_EXTABLE(1234b, 2f)
>> +#endif
>>       js 2f
>>       LOCK_PREFIX
>>       cmpxchg8b (%esi)
>> @@ -144,6 +200,11 @@ ENTRY(atomic64_add_unless_cx8)
>>       movl %edx, %ecx
>>       addl %ebp, %ebx
>>       adcl %edi, %ecx
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     into
>> +1234:
>> +     _ASM_EXTABLE(1234b, 3f)
>> +#endif
>>       LOCK_PREFIX
>>       cmpxchg8b (%esi)
>>       jne 1b
>> @@ -173,6 +234,11 @@ ENTRY(atomic64_inc_not_zero_cx8)
>>       xorl %ecx, %ecx
>>       addl $1, %ebx
>>       adcl %edx, %ecx
>> +#ifdef CONFIG_HARDENED_ATOMIC
>> +     into
>> +1234:
>> +     _ASM_EXTABLE(1234b, 3f)
>> +#endif
>>       LOCK_PREFIX
>>       cmpxchg8b (%esi)
>>       jne 1b
>> --
>> 2.7.4
>>
Reshetova, Elena Oct. 26, 2016, 11:15 a.m. UTC | #3
<snip>

Btw, hope no one minds when I cut out irrelevant parts out of conversation with snips like above. 
Otherwise I personally find taking more time to scroll down to find right places and risk to miss smth is higher.  

>> +#ifdef CONFIG_HARDENED_ATOMIC

>> +static __always_inline bool atomic_add_negative_wrap(int i, atomic_wrap_t *v)

>> +{

>> +     GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);

>> +}

>> +#endif /* CONFIG_HARDENED_ATOMIC */

>> +

>>  /**

>>   * atomic_add_return - add integer and return

>>   * @i: integer value to add

>> @@ -153,6 +322,18 @@ static __always_inline bool atomic_add_negative(int i, atomic_t *v)

>>   */

>>  static __always_inline int atomic_add_return(int i, atomic_t *v)

>>  {

>> +     return i + xadd_check_overflow(&v->counter, i);

>> +}

>

> If overflow, should this function still return i + v->counter?

> (The caller would die anyway, though.)

>


>Yes, because in the non-overflow case, xadd_check_overflow() would

>return the previous value of v->counter.  This gets added to i and

>returned, which is correct and guaranteed to not result in an overflow

>(if it did, the checks in xadd_check_overflow() would kill the

>process, as you noted).


>In the overflow case, the caller gets killed anyway: before

>xadd_check_overflow() can return, do_trap() calls

>hardened_atomic_overflow() which calls BUG(), so the return statement

>won't finish executing.


>One thing to note about the pattern of using i +

>xadd_check_overflow(): there's a potential TOCTOU issue if i can be

>modified after xadd_check_overflow() returns, but before the

>expression (i + xadd_check_overflow()) is evaluated.  In areas where i

>is shared between threads, we might want to make (i +

>xadd_check_overflow()) a critical section.


How should we mark critical section here? 

>>  #define atomic_dec_return(v)  (atomic_sub_return(1, v))

>> +#ifdef CONFIG_HARDENED_ATOMIC

>> +static __always_inline int atomic_dec_return_wrap(atomic_wrap_t *v)

>> +{

>> +     return atomic_sub_return_wrap(1, v);

>> +}

>> +#endif /* CONFIG_HARDENED_ATOMIC */

>>

>>  static __always_inline int atomic_fetch_add(int i, atomic_t *v)

>>  {

>

> and atomic_fetch_add/sub() should do

>

>         return xadd_check_overflow((+/-)i, v);


We don't have indeed wrap versions defined here, so basic ones were left unprotected. 
Fixed now. Thank you for noticing!

>> + * Atomically adds @a to @v, so long as @v was not already @u.

>> + * Returns the old value of @v.

>> + */

>> +static __always_inline int __atomic_add_unless_wrap(atomic_wrap_t *v,

>> +                                                 int a, int u)

>> +{

>> +     int c, old, new;

>> +     c = atomic_read_wrap(v);

>> +     for (;;) {

>> +             if (unlikely(c == (u)))

>> +                     break;

>> +

>> +             asm volatile("addl %2,%0\n"

>> +

>> +#ifdef CONFIG_HARDENED_ATOMIC

>> +                          "jno 0f\n"

>> +                          "subl %2,%0\n"

>> +                          "int $4\n0:\n"

>> +                          _ASM_EXTABLE(0b, 0b)

>> +#endif

>

> Is this a mistake? We don't need a check here.

>


>Yes, this appears to be a mistake.


Clear copy paste mistake. Fixed now. Thanks again!

>>   * Other variants with different arithmetic operators:

>>   */

>> @@ -149,6 +245,14 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)

>>       return i;

>>  }

>>

>> +static inline long long atomic64_sub_return_wrap(long long i, atomic64_wrap_t *v)

>> +{

>> +     alternative_atomic64(sub_return,

>

> sub_return_wrap?


Yes, thank you again! I thought I caugth them all last time, but no, this one still got in :(

Best Regards,
Elena.
Kees Cook Oct. 26, 2016, 8:51 p.m. UTC | #4
On Wed, Oct 26, 2016 at 4:15 AM, Reshetova, Elena
<elena.reshetova@intel.com> wrote:
>>> +static __always_inline int __atomic_add_unless_wrap(atomic_wrap_t *v,
>>> +                                                 int a, int u)
>>> +{
>>> +     int c, old, new;
>>> +     c = atomic_read_wrap(v);
>>> +     for (;;) {
>>> +             if (unlikely(c == (u)))
>>> +                     break;
>>> +
>>> +             asm volatile("addl %2,%0\n"
>>> +
>>> +#ifdef CONFIG_HARDENED_ATOMIC
>>> +                          "jno 0f\n"
>>> +                          "subl %2,%0\n"
>>> +                          "int $4\n0:\n"
>>> +                          _ASM_EXTABLE(0b, 0b)
>>> +#endif
>>
>> Is this a mistake? We don't need a check here.
>
>>Yes, this appears to be a mistake.
>
> Clear copy paste mistake. Fixed now. Thanks again!

What was the mistake here?

-Kees
David Windsor Oct. 26, 2016, 9:48 p.m. UTC | #5
On Wed, Oct 26, 2016 at 4:51 PM, Kees Cook <keescook@chromium.org> wrote:
> On Wed, Oct 26, 2016 at 4:15 AM, Reshetova, Elena
> <elena.reshetova@intel.com> wrote:
>>>> +static __always_inline int __atomic_add_unless_wrap(atomic_wrap_t *v,
>>>> +                                                 int a, int u)
>>>> +{
>>>> +     int c, old, new;
>>>> +     c = atomic_read_wrap(v);
>>>> +     for (;;) {
>>>> +             if (unlikely(c == (u)))
>>>> +                     break;
>>>> +
>>>> +             asm volatile("addl %2,%0\n"
>>>> +
>>>> +#ifdef CONFIG_HARDENED_ATOMIC
>>>> +                          "jno 0f\n"
>>>> +                          "subl %2,%0\n"
>>>> +                          "int $4\n0:\n"
>>>> +                          _ASM_EXTABLE(0b, 0b)
>>>> +#endif
>>>
>>> Is this a mistake? We don't need a check here.
>>
>>>Yes, this appears to be a mistake.
>>
>> Clear copy paste mistake. Fixed now. Thanks again!
>
> What was the mistake here?
>

It's an function that allows wrapping to occur: overflow protection
shouldn't happen here.

> -Kees
>
> --
> Kees Cook
> Nexus Security
Kees Cook Oct. 26, 2016, 9:52 p.m. UTC | #6
On Wed, Oct 26, 2016 at 2:48 PM, David Windsor <dwindsor@gmail.com> wrote:
> On Wed, Oct 26, 2016 at 4:51 PM, Kees Cook <keescook@chromium.org> wrote:
>> On Wed, Oct 26, 2016 at 4:15 AM, Reshetova, Elena
>> <elena.reshetova@intel.com> wrote:
>>>>> +static __always_inline int __atomic_add_unless_wrap(atomic_wrap_t *v,
>>>>> +                                                 int a, int u)
>>>>> +{
>>>>> +     int c, old, new;
>>>>> +     c = atomic_read_wrap(v);
>>>>> +     for (;;) {
>>>>> +             if (unlikely(c == (u)))
>>>>> +                     break;
>>>>> +
>>>>> +             asm volatile("addl %2,%0\n"
>>>>> +
>>>>> +#ifdef CONFIG_HARDENED_ATOMIC
>>>>> +                          "jno 0f\n"
>>>>> +                          "subl %2,%0\n"
>>>>> +                          "int $4\n0:\n"
>>>>> +                          _ASM_EXTABLE(0b, 0b)
>>>>> +#endif
>>>>
>>>> Is this a mistake? We don't need a check here.
>>>
>>>>Yes, this appears to be a mistake.
>>>
>>> Clear copy paste mistake. Fixed now. Thanks again!
>>
>> What was the mistake here?
>>
>
> It's an function that allows wrapping to occur: overflow protection
> shouldn't happen here.

Oh! Yes, of course. My eyes missed the _wrap part. :)

-Kees
diff mbox

Patch

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 402eee4..6c36184 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -79,6 +79,7 @@  config X86
 	select HAVE_AOUT			if X86_32
 	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_ARCH_HARDENED_USERCOPY
+	select HAVE_ARCH_HARDENED_ATOMIC
 	select HAVE_ARCH_HUGE_VMAP		if X86_64 || X86_PAE
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_KASAN			if X86_64 && SPARSEMEM_VMEMMAP
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 14635c5..4a35c9b 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -27,6 +27,17 @@  static __always_inline int atomic_read(const atomic_t *v)
 }
 
 /**
+ * atomic_read_wrap - read atomic variable
+ * @v: pointer of type atomic_wrap_t
+ *
+ * Atomically reads the value of @v.
+ */
+static __always_inline int atomic_read_wrap(const atomic_wrap_t *v)
+{
+	return ACCESS_ONCE((v)->counter);
+}
+
+/**
  * atomic_set - set atomic variable
  * @v: pointer of type atomic_t
  * @i: required value
@@ -39,6 +50,18 @@  static __always_inline void atomic_set(atomic_t *v, int i)
 }
 
 /**
+ * atomic_set_wrap - set atomic variable
+ * @v: pointer of type atomic_wrap_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+static __always_inline void atomic_set_wrap(atomic_wrap_t *v, int i)
+{
+	v->counter = i;
+}
+
+/**
  * atomic_add - add integer to atomic variable
  * @i: integer value to add
  * @v: pointer of type atomic_t
@@ -47,12 +70,55 @@  static __always_inline void atomic_set(atomic_t *v, int i)
  */
 static __always_inline void atomic_add(int i, atomic_t *v)
 {
-	asm volatile(LOCK_PREFIX "addl %1,%0"
+	asm volatile(LOCK_PREFIX "addl %1,%0\n"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     LOCK_PREFIX "subl %1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     : "+m" (v->counter)
 		     : "ir" (i));
 }
 
 /**
+ * atomic_add_wrap - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_wrap_t
+ *
+ * Atomically adds @i to @v.
+ */
+static __always_inline void atomic_add_wrap(int i, atomic_wrap_t *v)
+{
+	asm volatile(LOCK_PREFIX "addl %1,%0\n"
+		     : "+m" (v->counter)
+		     : "ir" (i));
+}
+
+/**
+ * atomic_add_and_test - add value from variable and test result
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool atomic_add_and_test(int i, atomic_t *v)
+{
+	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
+}
+
+#ifdef CONFIG_HARDENED_ATOMIC
+static __always_inline bool atomic_add_and_test_wrap(int i, atomic_wrap_t *v)
+{
+	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addl", v->counter, "er", i, "%0", e);
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
+
+/**
  * atomic_sub - subtract integer from atomic variable
  * @i: integer value to subtract
  * @v: pointer of type atomic_t
@@ -61,7 +127,29 @@  static __always_inline void atomic_add(int i, atomic_t *v)
  */
 static __always_inline void atomic_sub(int i, atomic_t *v)
 {
-	asm volatile(LOCK_PREFIX "subl %1,%0"
+	asm volatile(LOCK_PREFIX "subl %1,%0\n"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     LOCK_PREFIX "addl %1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "+m" (v->counter)
+		     : "ir" (i));
+}
+
+/**
+ * atomic_sub_wrap - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_wrap_t
+ *
+ * Atomically subtracts @i from @v.
+ */
+static __always_inline void atomic_sub_wrap(int i, atomic_wrap_t *v)
+{
+	asm volatile(LOCK_PREFIX "subl %1,%0\n"
 		     : "+m" (v->counter)
 		     : "ir" (i));
 }
@@ -77,7 +165,21 @@  static __always_inline void atomic_sub(int i, atomic_t *v)
  */
 static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
+	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", e);
+}
+
+/**
+ * atomic_sub_and_test_wrap - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_wrap_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool atomic_sub_and_test_wrap(int i, atomic_wrap_t *v)
+{
+	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
 }
 
 /**
@@ -88,7 +190,27 @@  static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
  */
 static __always_inline void atomic_inc(atomic_t *v)
 {
-	asm volatile(LOCK_PREFIX "incl %0"
+	asm volatile(LOCK_PREFIX "incl %0\n"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     LOCK_PREFIX "decl %0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "+m" (v->counter));
+}
+
+/**
+ * atomic_inc_wrap - increment atomic variable
+ * @v: pointer of type atomic_wrap_t
+ *
+ * Atomically increments @v by 1.
+ */
+static __always_inline void atomic_inc_wrap(atomic_wrap_t *v)
+{
+	asm volatile(LOCK_PREFIX "incl %0\n"
 		     : "+m" (v->counter));
 }
 
@@ -100,7 +222,27 @@  static __always_inline void atomic_inc(atomic_t *v)
  */
 static __always_inline void atomic_dec(atomic_t *v)
 {
-	asm volatile(LOCK_PREFIX "decl %0"
+	asm volatile(LOCK_PREFIX "decl %0\n"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     LOCK_PREFIX "incl %0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "+m" (v->counter));
+}
+
+/**
+ * atomic_dec_wrap - decrement atomic variable
+ * @v: pointer of type atomic_wrap_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static __always_inline void atomic_dec_wrap(atomic_wrap_t *v)
+{
+	asm volatile(LOCK_PREFIX "decl %0\n"
 		     : "+m" (v->counter));
 }
 
@@ -114,9 +256,16 @@  static __always_inline void atomic_dec(atomic_t *v)
  */
 static __always_inline bool atomic_dec_and_test(atomic_t *v)
 {
-	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
+	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", e);
 }
 
+#ifdef CONFIG_HARDENED_ATOMIC
+static __always_inline bool atomic_dec_and_test_wrap(atomic_wrap_t *v)
+{
+	GEN_UNARY_RMWcc_wrap(LOCK_PREFIX "decl", v->counter, "%0", e);
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
+
 /**
  * atomic_inc_and_test - increment and test
  * @v: pointer of type atomic_t
@@ -127,7 +276,20 @@  static __always_inline bool atomic_dec_and_test(atomic_t *v)
  */
 static __always_inline bool atomic_inc_and_test(atomic_t *v)
 {
-	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
+	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", e);
+}
+
+/**
+ * atomic_inc_and_test_wrap - increment and test
+ * @v: pointer of type atomic_wrap_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline int atomic_inc_and_test_wrap(atomic_wrap_t *v)
+{
+	GEN_UNARY_RMWcc_wrap(LOCK_PREFIX "incl", v->counter, "%0", e);
 }
 
 /**
@@ -141,9 +303,16 @@  static __always_inline bool atomic_inc_and_test(atomic_t *v)
  */
 static __always_inline bool atomic_add_negative(int i, atomic_t *v)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
+	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", s);
 }
 
+#ifdef CONFIG_HARDENED_ATOMIC
+static __always_inline bool atomic_add_negative_wrap(int i, atomic_wrap_t *v)
+{
+	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
+
 /**
  * atomic_add_return - add integer and return
  * @i: integer value to add
@@ -153,6 +322,18 @@  static __always_inline bool atomic_add_negative(int i, atomic_t *v)
  */
 static __always_inline int atomic_add_return(int i, atomic_t *v)
 {
+	return i + xadd_check_overflow(&v->counter, i);
+}
+
+/**
+ * atomic_add_return_wrap - add integer and return
+ * @i: integer value to add
+ * @v: pointer of type atomic_wrap_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static __always_inline int atomic_add_return_wrap(int i, atomic_wrap_t *v)
+{
 	return i + xadd(&v->counter, i);
 }
 
@@ -168,8 +349,26 @@  static __always_inline int atomic_sub_return(int i, atomic_t *v)
 	return atomic_add_return(-i, v);
 }
 
+#ifdef CONFIG_HARDENED_ATOMIC
+static __always_inline int atomic_sub_return_wrap(int i, atomic_wrap_t *v)
+{
+	return atomic_add_return_wrap(-i, v);
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
+
 #define atomic_inc_return(v)  (atomic_add_return(1, v))
+static __always_inline int atomic_inc_return_wrap(atomic_wrap_t *v)
+{
+	return atomic_add_return_wrap(1, v);
+}
+
 #define atomic_dec_return(v)  (atomic_sub_return(1, v))
+#ifdef CONFIG_HARDENED_ATOMIC
+static __always_inline int atomic_dec_return_wrap(atomic_wrap_t *v)
+{
+	return atomic_sub_return_wrap(1, v);
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
 
 static __always_inline int atomic_fetch_add(int i, atomic_t *v)
 {
@@ -186,11 +385,21 @@  static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 	return cmpxchg(&v->counter, old, new);
 }
 
+static __always_inline int atomic_cmpxchg_wrap(atomic_wrap_t *v, int old, int new)
+{
+	return cmpxchg(&v->counter, old, new);
+}
+
 static inline int atomic_xchg(atomic_t *v, int new)
 {
 	return xchg(&v->counter, new);
 }
 
+static inline int atomic_xchg_wrap(atomic_wrap_t *v, int new)
+{
+	return xchg(&v->counter, new);
+}
+
 #define ATOMIC_OP(op)							\
 static inline void atomic_##op(int i, atomic_t *v)			\
 {									\
@@ -236,12 +445,25 @@  ATOMIC_OPS(xor, ^)
  */
 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
-	int c, old;
+	int c, old, new;
 	c = atomic_read(v);
 	for (;;) {
 		if (unlikely(c == (u)))
 			break;
-		old = atomic_cmpxchg((v), c, c + (a));
+
+		asm volatile("addl %2,%0\n"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+			     "jno 0f\n"
+			     "subl %2,%0\n"
+			     "int $4\n0:\n"
+			     _ASM_EXTABLE(0b, 0b)
+#endif
+
+			     : "=r" (new)
+			     : "0" (c), "ir" (a));
+
+		old = atomic_cmpxchg((v), c, new);
 		if (likely(old == c))
 			break;
 		c = old;
@@ -250,6 +472,87 @@  static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
 }
 
 /**
+ * __atomic_add_unless__wrap - add unless the number is already a given value
+ * @v: pointer of type atomic_wrap_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns the old value of @v.
+ */
+static __always_inline int __atomic_add_unless_wrap(atomic_wrap_t *v,
+						    int a, int u)
+{
+	int c, old, new;
+	c = atomic_read_wrap(v);
+	for (;;) {
+		if (unlikely(c == (u)))
+			break;
+
+		asm volatile("addl %2,%0\n"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+			     "jno 0f\n"
+			     "subl %2,%0\n"
+			     "int $4\n0:\n"
+			     _ASM_EXTABLE(0b, 0b)
+#endif
+
+			     : "=r" (new)
+			     : "0" (c), "ir" (a));
+
+		old = atomic_cmpxchg_wrap((v), c, new);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+	return c;
+}
+
+/**
++ * atomic_inc_not_zero_hint - increment if not null
++ * @v: pointer of type atomic_t
++ * @hint: probable value of the atomic before the increment
++ *
++ * This version of atomic_inc_not_zero() gives a hint of probable
++ * value of the atomic. This helps processor to not read the memory
++ * before doing the atomic read/modify/write cycle, lowering
++ * number of bus transactions on some arches.
++ *
++ * Returns: 0 if increment was not done, 1 otherwise.
++ */
+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+{
+	int val, c = hint, new;
+
+	/* sanity test, should be removed by compiler if hint is a constant */
+	if (!hint)
+		return __atomic_add_unless(v, 1, 0);
+
+	do {
+		asm volatile("incl %0\n"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+			     "jno 0f\n"
+			     "decl %0\n"
+			     "int $4\n0:\n"
+			     _ASM_EXTABLE(0b, 0b)
+#endif
+
+			     : "=r" (new)
+			     : "0" (c));
+
+		val = atomic_cmpxchg((v), c, new);
+		if (val == c)
+			return 1;
+		c = val;
+	} while (c);
+
+	return 0;
+}
+
+/**
  * atomic_inc_short - increment of a short integer
  * @v: pointer to type int
  *
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 71d7705..7c88320 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -11,6 +11,14 @@  typedef struct {
 	u64 __aligned(8) counter;
 } atomic64_t;
 
+#ifdef CONFIG_HARDENED_ATOMIC
+typedef struct {
+	u64 __aligned(8) counter;
+} atomic64_wrap_t;
+#else
+typedef atomic64_t atomic64_wrap_t;
+#endif
+
 #define ATOMIC64_INIT(val)	{ (val) }
 
 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
@@ -36,21 +44,31 @@  typedef struct {
 	ATOMIC64_DECL_ONE(sym##_386)
 
 ATOMIC64_DECL_ONE(add_386);
+ATOMIC64_DECL_ONE(add_wrap_386);
 ATOMIC64_DECL_ONE(sub_386);
+ATOMIC64_DECL_ONE(sub_wrap_386);
 ATOMIC64_DECL_ONE(inc_386);
+ATOMIC64_DECL_ONE(inc_wrap_386);
 ATOMIC64_DECL_ONE(dec_386);
+ATOMIC64_DECL_ONE(dec_wrap_386);
 #endif
 
 #define alternative_atomic64(f, out, in...) \
 	__alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
 
 ATOMIC64_DECL(read);
+ATOMIC64_DECL(read_wrap);
 ATOMIC64_DECL(set);
+ATOMIC64_DECL(set_wrap);
 ATOMIC64_DECL(xchg);
 ATOMIC64_DECL(add_return);
+ATOMIC64_DECL(add_return_wrap);
 ATOMIC64_DECL(sub_return);
+ATOMIC64_DECL(sub_return_wrap);
 ATOMIC64_DECL(inc_return);
+ATOMIC64_DECL(inc_return_wrap);
 ATOMIC64_DECL(dec_return);
+ATOMIC64_DECL(dec_return_wrap);
 ATOMIC64_DECL(dec_if_positive);
 ATOMIC64_DECL(inc_not_zero);
 ATOMIC64_DECL(add_unless);
@@ -76,6 +94,21 @@  static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
 }
 
 /**
+ * atomic64_cmpxchg_wrap - cmpxchg atomic64 variable
+ * @p: pointer to type atomic64_wrap_t
+ * @o: expected value
+ * @n: new value
+ *
+ * Atomically sets @v to @n if it was equal to @o and returns
+ * the old value.
+ */
+
+static inline long long atomic64_cmpxchg_wrap(atomic64_wrap_t *v, long long o, long long n)
+{
+	return cmpxchg64(&v->counter, o, n);
+}
+
+/**
  * atomic64_xchg - xchg atomic64 variable
  * @v: pointer to type atomic64_t
  * @n: value to assign
@@ -95,6 +128,25 @@  static inline long long atomic64_xchg(atomic64_t *v, long long n)
 }
 
 /**
+ * atomic64_xchg_wrap - xchg atomic64 variable
+ * @v: pointer to type atomic64_wrap_t
+ * @n: value to assign
+ *
+ * Atomically xchgs the value of @v to @n and returns
+ * the old value.
+ */
+static inline long long atomic64_xchg_wrap(atomic64_wrap_t *v, long long n)
+{
+	long long o;
+	unsigned high = (unsigned)(n >> 32);
+	unsigned low = (unsigned)n;
+	alternative_atomic64(xchg, "=&A" (o),
+			     "S" (v), "b" (low), "c" (high)
+			     : "memory");
+	return o;
+}
+
+/**
  * atomic64_set - set atomic64 variable
  * @v: pointer to type atomic64_t
  * @i: value to assign
@@ -111,6 +163,22 @@  static inline void atomic64_set(atomic64_t *v, long long i)
 }
 
 /**
+ * atomic64_set_wrap - set atomic64 variable
+ * @v: pointer to type atomic64_wrap_t
+ * @n: value to assign
+ *
+ * Atomically sets the value of @v to @n.
+ */
+static inline void atomic64_set_wrap(atomic64_wrap_t *v, long long i)
+{
+	unsigned high = (unsigned)(i >> 32);
+	unsigned low = (unsigned)i;
+	alternative_atomic64(set, /* no output */,
+			     "S" (v), "b" (low), "c" (high)
+			     : "eax", "edx", "memory");
+}
+
+/**
  * atomic64_read - read atomic64 variable
  * @v: pointer to type atomic64_t
  *
@@ -121,7 +189,20 @@  static inline long long atomic64_read(const atomic64_t *v)
 	long long r;
 	alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
 	return r;
- }
+}
+
+/**
+ * atomic64_read_wrap - read atomic64 variable
+ * @v: pointer to type atomic64_wrap_t
+ *
+ * Atomically reads the value of @v and returns it.
+ */
+static inline long long atomic64_read_wrap(const atomic64_wrap_t *v)
+{
+	long long r;
+	alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
+	return r;
+}
 
 /**
  * atomic64_add_return - add and return
@@ -138,6 +219,21 @@  static inline long long atomic64_add_return(long long i, atomic64_t *v)
 	return i;
 }
 
+/**
+ * atomic64_add_return_wrap - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_wrap_t
+ *
+ * Atomically adds @i to @v and returns @i + *@v
+ */
+static inline long long atomic64_add_return_wrap(long long i, atomic64_wrap_t *v)
+{
+	alternative_atomic64(add_return_wrap,
+			     ASM_OUTPUT2("+A" (i), "+c" (v)),
+			     ASM_NO_INPUT_CLOBBER("memory"));
+	return i;
+}
+
 /*
  * Other variants with different arithmetic operators:
  */
@@ -149,6 +245,14 @@  static inline long long atomic64_sub_return(long long i, atomic64_t *v)
 	return i;
 }
 
+static inline long long atomic64_sub_return_wrap(long long i, atomic64_wrap_t *v)
+{
+	alternative_atomic64(sub_return,
+			     ASM_OUTPUT2("+A" (i), "+c" (v)),
+			     ASM_NO_INPUT_CLOBBER("memory"));
+	return i;
+}
+
 static inline long long atomic64_inc_return(atomic64_t *v)
 {
 	long long a;
@@ -157,6 +261,14 @@  static inline long long atomic64_inc_return(atomic64_t *v)
 	return a;
 }
 
+static inline long long atomic64_inc_return_wrap(atomic64_wrap_t *v)
+{
+	long long a;
+	alternative_atomic64(inc_return_wrap, "=&A" (a),
+			     "S" (v) : "memory", "ecx");
+	return a;
+}
+
 static inline long long atomic64_dec_return(atomic64_t *v)
 {
 	long long a;
@@ -165,6 +277,16 @@  static inline long long atomic64_dec_return(atomic64_t *v)
 	return a;
 }
 
+#ifdef CONFIG_HARDENED_ATOMIC
+static inline long long atomic64_dec_return_wrap(atomic64_wrap_t *v)
+{
+	long long a;
+	alternative_atomic64(dec_return_wrap, "=&A" (a),
+			     "S" (v) : "memory", "ecx");
+	return a;
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
+
 /**
  * atomic64_add - add integer to atomic64 variable
  * @i: integer value to add
@@ -181,6 +303,42 @@  static inline long long atomic64_add(long long i, atomic64_t *v)
 }
 
 /**
+ * atomic64_add_wrap - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_wrap_t
+ *
+ * Atomically adds @i to @v.
+ */
+static inline long long atomic64_add_wrap(long long i, atomic64_wrap_t *v)
+{
+	__alternative_atomic64(add_wrap, add_return_wrap,
+			       ASM_OUTPUT2("+A" (i), "+c" (v)),
+			       ASM_NO_INPUT_CLOBBER("memory"));
+	return i;
+}
+
+/**
+ * atomic64_add_and_test - add value from variable and test result
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int atomic64_add_and_test(long long i, atomic64_t *v)
+{
+	return atomic64_add_return(i, v) == 0;
+}
+
+#ifdef CONFIG_HARDENED_ATOMIC
+static inline int atomic64_add_and_test_wrap(long long i, atomic64_wrap_t *v)
+{
+	return atomic64_add_return_wrap(i, v) == 0;
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
+
+/**
  * atomic64_sub - subtract the atomic64 variable
  * @i: integer value to subtract
  * @v: pointer to type atomic64_t
@@ -209,6 +367,13 @@  static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
 	return atomic64_sub_return(i, v) == 0;
 }
 
+#ifdef CONFIG_HARDENED_ATOMIC
+static inline int atomic64_sub_and_test_wrap(long long i, atomic64_wrap_t *v)
+{
+	return atomic64_sub_return_wrap(i, v) == 0;
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
+
 /**
  * atomic64_inc - increment atomic64 variable
  * @v: pointer to type atomic64_t
@@ -222,6 +387,18 @@  static inline void atomic64_inc(atomic64_t *v)
 }
 
 /**
+ * atomic64_inc_wrap - increment atomic64 variable
+ * @v: pointer to type atomic64_wrap_t
+ *
+ * Atomically increments @v by 1.
+ */
+static inline void atomic64_inc_wrap(atomic64_wrap_t *v)
+{
+	__alternative_atomic64(inc_wrap, inc_return_wrap, /* no output */,
+			       "S" (v) : "memory", "eax", "ecx", "edx");
+}
+
+/**
  * atomic64_dec - decrement atomic64 variable
  * @v: pointer to type atomic64_t
  *
@@ -246,6 +423,13 @@  static inline int atomic64_dec_and_test(atomic64_t *v)
 	return atomic64_dec_return(v) == 0;
 }
 
+#ifdef CONFIG_HARDENED_ATOMIC
+static inline int atomic64_dec_and_test_wrap(atomic64_wrap_t *v)
+{
+	return atomic64_dec_return_wrap(v) == 0;
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
+
 /**
  * atomic64_inc_and_test - increment and test
  * @v: pointer to type atomic64_t
@@ -259,6 +443,13 @@  static inline int atomic64_inc_and_test(atomic64_t *v)
 	return atomic64_inc_return(v) == 0;
 }
 
+#ifdef CONFIG_HARDENED_ATOMIC
+static inline int atomic64_inc_and_test_wrap(atomic64_wrap_t *v)
+{
+	return atomic64_inc_return_wrap(v) == 0;
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
+
 /**
  * atomic64_add_negative - add and test if negative
  * @i: integer value to add
@@ -273,6 +464,13 @@  static inline int atomic64_add_negative(long long i, atomic64_t *v)
 	return atomic64_add_return(i, v) < 0;
 }
 
+#ifdef CONFIG_HARDENED_ATOMIC
+static inline int atomic64_add_negative_wrap(long long i, atomic64_wrap_t *v)
+{
+	return atomic64_add_return_wrap(i, v) < 0;
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
+
 /**
  * atomic64_add_unless - add unless the number is a given value
  * @v: pointer of type atomic64_t
@@ -292,7 +490,6 @@  static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 	return (int)a;
 }
 
-
 static inline int atomic64_inc_not_zero(atomic64_t *v)
 {
 	int r;
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 89ed2f6..d8d3a3d 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -22,6 +22,18 @@  static inline long atomic64_read(const atomic64_t *v)
 }
 
 /**
+ * atomic64_read_wrap - read atomic64 variable
+ * @v: pointer of type atomic64_wrap_t
+ *
+ * Atomically reads the value of @v.
+ * Doesn't imply a read memory barrier.
+ */
+static inline long atomic64_read_wrap(const atomic64_wrap_t *v)
+{
+	return ACCESS_ONCE((v)->counter);
+}
+
+/**
  * atomic64_set - set atomic64 variable
  * @v: pointer to type atomic64_t
  * @i: required value
@@ -34,6 +46,18 @@  static inline void atomic64_set(atomic64_t *v, long i)
 }
 
 /**
+ * atomic64_set_wrap - set atomic64 variable
+ * @v: pointer to type atomic64_wrap_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+static inline void atomic64_set_wrap(atomic64_wrap_t *v, long i)
+{
+	v->counter = i;
+}
+
+/**
  * atomic64_add - add integer to atomic64 variable
  * @i: integer value to add
  * @v: pointer to type atomic64_t
@@ -42,12 +66,55 @@  static inline void atomic64_set(atomic64_t *v, long i)
  */
 static __always_inline void atomic64_add(long i, atomic64_t *v)
 {
+	asm volatile(LOCK_PREFIX "addq %1,%0\n"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     LOCK_PREFIX "subq %1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "=m" (v->counter)
+		     : "er" (i), "m" (v->counter));
+}
+
+/**
+ * atomic64_add_wrap - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_wrap_t
+ *
+ * Atomically adds @i to @v.
+ */
+static __always_inline void atomic64_add_wrap(long i, atomic64_wrap_t *v)
+{
 	asm volatile(LOCK_PREFIX "addq %1,%0"
 		     : "=m" (v->counter)
 		     : "er" (i), "m" (v->counter));
 }
 
 /**
+ * atomic64_add_and_test - add value from variable and test result
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically adds @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool atomic64_add_and_test(long i, atomic64_t *v)
+{
+	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
+}
+
+#ifdef CONFIG_HARDENED_ATOMIC
+static inline bool atomic64_add_and_test_wrap(long i, atomic64_wrap_t *v)
+{
+	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addq", v->counter, "er", i, "%0", e);
+}
+#endif /* CONFIG_HARDENED_ATMOMIC */
+
+/**
  * atomic64_sub - subtract the atomic64 variable
  * @i: integer value to subtract
  * @v: pointer to type atomic64_t
@@ -56,6 +123,26 @@  static __always_inline void atomic64_add(long i, atomic64_t *v)
  */
 static inline void atomic64_sub(long i, atomic64_t *v)
 {
+	asm volatile(LOCK_PREFIX "subq %1,%0\n"
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     LOCK_PREFIX "addq %1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+		     : "=m" (v->counter)
+		     : "er" (i), "m" (v->counter));
+}
+
+/**
++ * atomic64_sub_wrap - subtract the atomic64 variable
++ * @i: integer value to subtract
++ * @v: pointer to type atomic64_wrap_t
++ *
++ * Atomically subtracts @i from @v.
++ */
+static inline void atomic64_sub_wrap(long i, atomic64_wrap_t *v)
+{
 	asm volatile(LOCK_PREFIX "subq %1,%0"
 		     : "=m" (v->counter)
 		     : "er" (i), "m" (v->counter));
@@ -72,7 +159,21 @@  static inline void atomic64_sub(long i, atomic64_t *v)
  */
 static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
+	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", e);
+}
+
+/**
+ * atomic64_sub_and_test_wrap - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_wrap_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool atomic64_sub_and_test_wrap(long i, atomic64_wrap_t *v)
+{
+	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
 }
 
 /**
@@ -83,6 +184,26 @@  static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
  */
 static __always_inline void atomic64_inc(atomic64_t *v)
 {
+	asm volatile(LOCK_PREFIX "incq %0\n"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     LOCK_PREFIX "decq %0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+		     : "=m" (v->counter)
+		     : "m" (v->counter));
+}
+
+/**
+ * atomic64_inc_wrap - increment atomic64 variable
+ * @v: pointer to type atomic64_wrap_t
+ *
+ * Atomically increments @v by 1.
+ */
+static __always_inline void atomic64_inc_wrap(atomic64_wrap_t *v)
+{
 	asm volatile(LOCK_PREFIX "incq %0"
 		     : "=m" (v->counter)
 		     : "m" (v->counter));
@@ -96,6 +217,26 @@  static __always_inline void atomic64_inc(atomic64_t *v)
  */
 static __always_inline void atomic64_dec(atomic64_t *v)
 {
+	asm volatile(LOCK_PREFIX "decq %0\n"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     LOCK_PREFIX "incq %0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+		     : "=m" (v->counter)
+		     : "m" (v->counter));
+}
+
+/**
+ * atomic64_dec_wrap - decrement atomic64 variable
+ * @v: pointer to type atomic64_wrap_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static __always_inline void atomic64_dec_wrap(atomic64_wrap_t *v)
+{
 	asm volatile(LOCK_PREFIX "decq %0"
 		     : "=m" (v->counter)
 		     : "m" (v->counter));
@@ -111,8 +252,15 @@  static __always_inline void atomic64_dec(atomic64_t *v)
  */
 static inline bool atomic64_dec_and_test(atomic64_t *v)
 {
-	GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
+	GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", e);
+}
+
+#ifdef CONFIG_HARDENED_ATOMIC
+static inline bool atomic64_dec_and_test_wrap(atomic64_wrap_t *v)
+{
+	GEN_UNARY_RMWcc_wrap(LOCK_PREFIX "decq", v->counter, "%0", e);
 }
+#endif /* CONFIG_HARDENED_ATOMIC */
 
 /**
  * atomic64_inc_and_test - increment and test
@@ -124,8 +272,15 @@  static inline bool atomic64_dec_and_test(atomic64_t *v)
  */
 static inline bool atomic64_inc_and_test(atomic64_t *v)
 {
-	GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
+	GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", e);
+}
+
+#ifdef CONFIG_HARDENED_ATOMIC
+static inline bool atomic64_inc_and_test_wrap(atomic64_wrap_t *v)
+{
+	GEN_UNARY_RMWcc_wrap(LOCK_PREFIX "incq", v->counter, "%0", e);
 }
+#endif /* CONFIG_HARDENED_ATOMIC */
 
 /**
  * atomic64_add_negative - add and test if negative
@@ -138,8 +293,15 @@  static inline bool atomic64_inc_and_test(atomic64_t *v)
  */
 static inline bool atomic64_add_negative(long i, atomic64_t *v)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
+	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", s);
+}
+
+#ifdef CONFIG_HARDENED_ATOMIC
+static inline bool atomic64_add_negative_wrap(long i, atomic64_wrap_t *v)
+{
+	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
 }
+#endif /* CONFIG_HARDENED_ATOMIC */
 
 /**
  * atomic64_add_return - add and return
@@ -150,6 +312,11 @@  static inline bool atomic64_add_negative(long i, atomic64_t *v)
  */
 static __always_inline long atomic64_add_return(long i, atomic64_t *v)
 {
+	return i + xadd_check_overflow(&v->counter, i);
+}
+
+static __always_inline long atomic64_add_return_wrap(long i, atomic64_wrap_t *v)
+{
 	return i + xadd(&v->counter, i);
 }
 
@@ -158,6 +325,13 @@  static inline long atomic64_sub_return(long i, atomic64_t *v)
 	return atomic64_add_return(-i, v);
 }
 
+#ifdef CONFIG_HARDENED_ATOMIC
+static inline long atomic64_sub_return_wrap(long i, atomic64_wrap_t *v)
+{
+	return atomic64_add_return_wrap(-i, v);
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
+
 static inline long atomic64_fetch_add(long i, atomic64_t *v)
 {
 	return xadd(&v->counter, i);
@@ -171,16 +345,29 @@  static inline long atomic64_fetch_sub(long i, atomic64_t *v)
 #define atomic64_inc_return(v)  (atomic64_add_return(1, (v)))
 #define atomic64_dec_return(v)  (atomic64_sub_return(1, (v)))
 
+#define atomic64_inc_return_wrap(v)  (atomic64_add_return_wrap(1, (v)))
+#define atomic64_dec_return_wrap(v)  (atomic64_sub_return_wrap(1, (v)))
+
 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
 {
 	return cmpxchg(&v->counter, old, new);
 }
 
+static inline long atomic64_cmpxchg_wrap(atomic64_wrap_t *v, long old, long new)
+{
+	return cmpxchg(&v->counter, old, new);
+}
+
 static inline long atomic64_xchg(atomic64_t *v, long new)
 {
 	return xchg(&v->counter, new);
 }
 
+static inline long atomic64_xchg_wrap(atomic64_wrap_t *v, long new)
+{
+	return xchg(&v->counter, new);
+}
+
 /**
  * atomic64_add_unless - add unless the number is a given value
  * @v: pointer of type atomic64_t
@@ -192,11 +379,21 @@  static inline long atomic64_xchg(atomic64_t *v, long new)
  */
 static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
 {
-	long c, old;
+	long c, old, new;
 	c = atomic64_read(v);
 	for (;;) {
 		if (unlikely(c == (u)))
 			break;
+		asm volatile("add %2,%0\n"
+#ifdef CONFIG_HARDENED_ATOMIC
+			     "jno 0f\n"
+			     "sub %2,%0\n"
+			     "int $4\n0:\n"
+			     _ASM_EXTABLE(0b, 0b)
+#endif
+			     : "=r" (new)
+			     : "0" (c), "ir" (a));
+
 		old = atomic64_cmpxchg((v), c, c + (a));
 		if (likely(old == c))
 			break;
@@ -205,6 +402,27 @@  static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
 	return c != (u);
 }
 
+#ifdef CONFIG_HARDENED_ATOMIC
+static inline bool atomic64_add_unless_wrap(atomic64_wrap_t *v, long a, long u)
+{
+	long c, old, new;
+	c = atomic64_read_wrap(v);
+	for (;;) {
+		if (unlikely(c == (u)))
+			break;
+		asm volatile("add %2,%0\n"
+			     : "=r" (new)
+			     : "0" (c), "ir" (a));
+
+		old = atomic64_cmpxchg_wrap((v), c, c + (a));
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+	return c != (u);
+}
+#endif /* CONFIG_HARDENED_ATOMIC */
+
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
 /*
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 68557f52..e25eb0d 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -50,7 +50,7 @@ 
  * a mask operation on a byte.
  */
 #define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))
-#define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
+#define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
 #define CONST_MASK(nr)			(1 << ((nr) & 7))
 
 /**
@@ -203,7 +203,7 @@  static __always_inline void change_bit(long nr, volatile unsigned long *addr)
  */
 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
+	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
 }
 
 /**
@@ -249,7 +249,7 @@  static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
  */
 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
+	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
 }
 
 /**
@@ -302,7 +302,7 @@  static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
  */
 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
+	GEN_BINARY_RMWcc_wrap(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
 }
 
 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 9733361..b83f612 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -13,10 +13,14 @@  extern void __xchg_wrong_size(void)
 	__compiletime_error("Bad argument size for xchg");
 extern void __cmpxchg_wrong_size(void)
 	__compiletime_error("Bad argument size for cmpxchg");
+extern void __xadd_check_overflow_wrong_size(void)
+	__compiletime_error("Bad argument size for xadd_check_overflow");
 extern void __xadd_wrong_size(void)
 	__compiletime_error("Bad argument size for xadd");
 extern void __add_wrong_size(void)
 	__compiletime_error("Bad argument size for add");
+extern void __add_check_overflow_wrong_size(void)
+	__compiletime_error("Bad argument size for add_check_overflow");
 
 /*
  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -68,6 +72,38 @@  extern void __add_wrong_size(void)
 		__ret;							\
 	})
 
+#ifdef CONFIG_HARDENED_ATOMIC
+#define __xchg_op_check_overflow(ptr, arg, op, lock)			\
+	({								\
+	        __typeof__ (*(ptr)) __ret = (arg);			\
+		switch (sizeof(*(ptr))) {				\
+		case __X86_CASE_L:					\
+			asm volatile (lock #op "l %0, %1\n"		\
+				      "jno 0f\n"			\
+				      "mov %0,%1\n"			\
+				      "int $4\n0:\n"			\
+				      _ASM_EXTABLE(0b, 0b)		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_Q:					\
+			asm volatile (lock #op "q %q0, %1\n"		\
+				      "jno 0f\n"			\
+				      "mov %0,%1\n"			\
+				      "int $4\n0:\n"			\
+				      _ASM_EXTABLE(0b, 0b)		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		default:						\
+			__ ## op ## _check_overflow_wrong_size();	\
+		}							\
+		__ret;							\
+	})
+#else
+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
+#endif
+
 /*
  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
  * Since this is generally used to protect other memory information, we
@@ -166,6 +202,9 @@  extern void __add_wrong_size(void)
 #define xadd_sync(ptr, inc)	__xadd((ptr), (inc), "lock; ")
 #define xadd_local(ptr, inc)	__xadd((ptr), (inc), "")
 
+#define __xadd_check_overflow(ptr, inc, lock)	__xchg_op_check_overflow((ptr), (inc), xadd, lock)
+#define xadd_check_overflow(ptr, inc)		__xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
+
 #define __add(ptr, inc, lock)						\
 	({								\
 	        __typeof__ (*(ptr)) __ret = (inc);			\
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 7511978..46cfaf0 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -10,25 +10,69 @@  typedef struct {
 	atomic_long_t a;
 } local_t;
 
+typedef struct {
+	atomic_long_wrap_t a;
+} local_wrap_t;
+
 #define LOCAL_INIT(i)	{ ATOMIC_LONG_INIT(i) }
 
 #define local_read(l)	atomic_long_read(&(l)->a)
+#define local_read_wrap(l)	atomic_long_read_wrap(&(l)->a)
 #define local_set(l, i)	atomic_long_set(&(l)->a, (i))
+#define local_set_wrap(l, i)	atomic_long_set_wrap(&(l)->a, (i))
 
 static inline void local_inc(local_t *l)
 {
+	asm volatile(_ASM_INC "%0\n"
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     _ASM_DEC "%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+		     : "+m" (l->a.counter));
+}
+
+static inline void local_inc_wrap(local_wrap_t *l)
+{
 	asm volatile(_ASM_INC "%0"
 		     : "+m" (l->a.counter));
 }
 
 static inline void local_dec(local_t *l)
 {
+	asm volatile(_ASM_DEC "%0\n"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     _ASM_INC "%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+		     : "+m" (l->a.counter));
+}
+
+static inline void local_dec_wrap(local_wrap_t *l)
+{
 	asm volatile(_ASM_DEC "%0"
 		     : "+m" (l->a.counter));
 }
 
 static inline void local_add(long i, local_t *l)
 {
+	asm volatile(_ASM_ADD "%1,%0\n"
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     _ASM_SUB "%1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+		     : "+m" (l->a.counter)
+		     : "ir" (i));
+}
+
+static inline void local_add_wrap(long i, local_wrap_t *l)
+{
 	asm volatile(_ASM_ADD "%1,%0"
 		     : "+m" (l->a.counter)
 		     : "ir" (i));
@@ -36,6 +80,19 @@  static inline void local_add(long i, local_t *l)
 
 static inline void local_sub(long i, local_t *l)
 {
+	asm volatile(_ASM_SUB "%1,%0\n"
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     _ASM_ADD "%1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+		     : "+m" (l->a.counter)
+		     : "ir" (i));
+}
+
+static inline void local_sub_wrap(long i, local_wrap_t *l)
+{
 	asm volatile(_ASM_SUB "%1,%0"
 		     : "+m" (l->a.counter)
 		     : "ir" (i));
@@ -52,7 +109,7 @@  static inline void local_sub(long i, local_t *l)
  */
 static inline bool local_sub_and_test(long i, local_t *l)
 {
-	GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e);
+	GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", e);
 }
 
 /**
@@ -65,7 +122,7 @@  static inline bool local_sub_and_test(long i, local_t *l)
  */
 static inline bool local_dec_and_test(local_t *l)
 {
-	GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e);
+	GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", e);
 }
 
 /**
@@ -78,7 +135,7 @@  static inline bool local_dec_and_test(local_t *l)
  */
 static inline bool local_inc_and_test(local_t *l)
 {
-	GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e);
+	GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", e);
 }
 
 /**
@@ -92,7 +149,7 @@  static inline bool local_inc_and_test(local_t *l)
  */
 static inline bool local_add_negative(long i, local_t *l)
 {
-	GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s);
+	GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", s);
 }
 
 /**
@@ -105,6 +162,28 @@  static inline bool local_add_negative(long i, local_t *l)
 static inline long local_add_return(long i, local_t *l)
 {
 	long __i = i;
+	asm volatile(_ASM_XADD "%0, %1\n"
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     _ASM_MOV "%0,%1\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+		     : "+r" (i), "+m" (l->a.counter)
+		     : : "memory");
+	return i + __i;
+}
+
+/**
+ * local_add_return_wrap - add and return
+ * @i: integer value to add
+ * @l: pointer to type local_wrap_t
+ *
+ * Atomically adds @i to @l and returns @i + @l
+ */
+static inline long local_add_return_wrap(long i, local_wrap_t *l)
+{
+	long __i = i;
 	asm volatile(_ASM_XADD "%0, %1;"
 		     : "+r" (i), "+m" (l->a.counter)
 		     : : "memory");
@@ -121,6 +200,8 @@  static inline long local_sub_return(long i, local_t *l)
 
 #define local_cmpxchg(l, o, n) \
 	(cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_cmpxchg_wrap(l, o, n) \
+	(cmpxchg_local(&((l)->a.counter), (o), (n)))
 /* Always has a lock prefix */
 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
 
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 17f2186..2fa0e84 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -81,7 +81,7 @@  static __always_inline void __preempt_count_sub(int val)
  */
 static __always_inline bool __preempt_count_dec_and_test(void)
 {
-	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
+    GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), e);
 }
 
 /*
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index 661dd30..0375d3f 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -5,28 +5,80 @@ 
 
 /* Use asm goto */
 
-#define __GEN_RMWcc(fullop, var, cc, ...)				\
+#ifdef CONFIG_HARDENED_ATOMIC
+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)			\
 do {									\
-	asm_volatile_goto (fullop "; j" #cc " %l[cc_label]"		\
+	asm_volatile_goto (fullop					\
+			";jno 0f\n"					\
+			fullantiop					\
+			";int $4\n0:\n"					\
+			_ASM_EXTABLE(0b, 0b)				\
+			 ";j" #cc " %l[cc_label]"			\
 			: : "m" (var), ## __VA_ARGS__ 			\
 			: "memory" : cc_label);				\
 	return 0;							\
 cc_label:								\
 	return 1;							\
 } while (0)
+#else
+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)			\
+do {									\
+	asm_volatile_goto (fullop ";j" #cc " %l[cc_label]"		\
+			: : "m" (var), ## __VA_ARGS__ 			\
+			: "memory" : cc_label);				\
+	return 0;							\
+cc_label:								\
+	return 1;							\
+} while (0)
+#endif
 
-#define GEN_UNARY_RMWcc(op, var, arg0, cc) 				\
-	__GEN_RMWcc(op " " arg0, var, cc)
-
-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)			\
-	__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
+#define __GEN_RMWcc_wrap(fullop, var, cc, ...)do {									\
+	asm_volatile_goto (fullop "; j" #cc " %l[cc_label]"		\
+			: : "m" (var), ## __VA_ARGS__ 			\
+			: "memory" : cc_label);				\
+	return 0;							\
+cc_label:								\
+	return 1;							\
+} while (0)
 
+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) 			\
+	__GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
+#define GEN_UNARY_RMWcc_wrap(op, var, arg0, cc) 			\
+	__GEN_RMWcc_wrap(op " " arg0, var, cc)
+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc)		\
+	__GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
+#define GEN_BINARY_RMWcc_wrap(op, var, vcon, val, arg0, cc)	\
+	__GEN_RMWcc_wrap(op " %1, " arg0, var, cc, vcon (val))
 #else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
 
 /* Use flags output or a set instruction */
 
-#define __GEN_RMWcc(fullop, var, cc, ...)				\
+#ifdef CONFIG_HARDENED_ATOMIC
+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)			\
 do {									\
+	char c;								\
+	asm volatile (fullop 						\
+			";jno 0f\n"					\
+			fullantiop					\
+			";int $4\n0:\n"					\
+			_ASM_EXTABLE(0b, 0b)				\
+			";" CC_SET(cc)				\
+			: "+m" (var), CC_OUT(cc) (c)			\
+			: __VA_ARGS__ : "memory");			\
+	return c != 0;							\
+} while (0)
+#else
+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)			\
+do {									\
+	char c;								\
+	asm volatile (fullop ";" CC_SET(cc)				\
+			: "+m" (var), CC_OUT(cc) (c)			\
+			: __VA_ARGS__ : "memory");			\
+	return c != 0;							\
+} while (0)
+#endif
+
+#define __GEN_RMWcc_wrap(fullop, var, cc, ...)do {									\
 	bool c;								\
 	asm volatile (fullop ";" CC_SET(cc)				\
 			: "+m" (var), CC_OUT(cc) (c)			\
@@ -34,12 +86,14 @@  do {									\
 	return c;							\
 } while (0)
 
-#define GEN_UNARY_RMWcc(op, var, arg0, cc)				\
-	__GEN_RMWcc(op " " arg0, var, cc)
-
-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)			\
-	__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
-
+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc)			\
+	__GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
+#define GEN_UNARY_RMWcc_wrap(op, var, arg0, cc)			\
+	__GEN_RMWcc_wrap(op " " arg0, var, cc)
+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc)		\
+	__GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
+#define GEN_BINARY_RMWcc_wrap(op, var, vcon, val, arg0, cc)	\
+	__GEN_RMWcc_wrap(op " %2, " arg0, var, cc, vcon (val))
 #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
 
 #endif /* _ASM_X86_RMWcc */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 3d33a71..4d3f8a5 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -64,6 +64,14 @@  static inline void __down_read(struct rw_semaphore *sem)
 {
 	asm volatile("# beginning down_read\n\t"
 		     LOCK_PREFIX _ASM_INC "(%1)\n\t"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     LOCK_PREFIX _ASM_DEC "(%1)\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     /* adds 0x00000001 */
 		     "  jns        1f\n"
 		     "  call call_rwsem_down_read_failed\n"
@@ -85,6 +93,14 @@  static inline bool __down_read_trylock(struct rw_semaphore *sem)
 		     "1:\n\t"
 		     "  mov          %1,%2\n\t"
 		     "  add          %3,%2\n\t"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     "sub %3,%2\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     "  jle	     2f\n\t"
 		     LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
 		     "  jnz	     1b\n\t"
@@ -99,12 +115,22 @@  static inline bool __down_read_trylock(struct rw_semaphore *sem)
 /*
  * lock for writing
  */
+#ifdef CONFIG_HARDENED_ATOMIC
+#define ____down_write_undo \
+		     "jno 0f\n"\
+		     "mov %1,(%2)\n"\
+		     "int $4\n0:\n"\
+		     _ASM_EXTABLE(0b, 0b)
+#else
+#define ____down_write_undo
+#endif
 #define ____down_write(sem, slow_path)			\
 ({							\
 	long tmp;					\
 	struct rw_semaphore* ret;			\
 	asm volatile("# beginning down_write\n\t"	\
 		     LOCK_PREFIX "  xadd      %1,(%3)\n\t"	\
+		     ____down_write_undo		\
 		     /* adds 0xffff0001, returns the old value */ \
 		     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
 		     /* was the active mask 0 before? */\
@@ -166,6 +192,14 @@  static inline void __up_read(struct rw_semaphore *sem)
 	long tmp;
 	asm volatile("# beginning __up_read\n\t"
 		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     "mov %1,(%2)\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     /* subtracts 1, returns the old value */
 		     "  jns        1f\n\t"
 		     "  call call_rwsem_wake\n" /* expects old value in %edx */
@@ -184,6 +218,14 @@  static inline void __up_write(struct rw_semaphore *sem)
 	long tmp;
 	asm volatile("# beginning __up_write\n\t"
 		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     "mov %1,(%2)\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     /* subtracts 0xffff0001, returns the old value */
 		     "  jns        1f\n\t"
 		     "  call call_rwsem_wake\n" /* expects old value in %edx */
@@ -201,6 +243,14 @@  static inline void __downgrade_write(struct rw_semaphore *sem)
 {
 	asm volatile("# beginning __downgrade_write\n\t"
 		     LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
+
+#ifdef CONFIG_HARDENED_ATOMIC
+		     "jno 0f\n"
+		     LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     /*
 		      * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
 		      *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index bd4e3d4..d67a914 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -191,6 +191,10 @@  do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
 			tsk->thread.trap_nr = trapnr;
 			die(str, regs, error_code);
 		}
+
+		if (trapnr == X86_TRAP_OF)
+			hardened_atomic_overflow(regs);
+
 		return 0;
 	}
 
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
index 9b0ca8f..0e8a888 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -45,6 +45,10 @@  BEGIN(read)
 	movl  (v), %eax
 	movl 4(v), %edx
 RET_ENDP
+BEGIN(read_wrap)
+	movl  (v), %eax
+	movl 4(v), %edx
+RET_ENDP
 #undef v
 
 #define v %esi
@@ -52,6 +56,10 @@  BEGIN(set)
 	movl %ebx,  (v)
 	movl %ecx, 4(v)
 RET_ENDP
+BEGIN(set_wrap)
+	movl %ebx,  (v)
+	movl %ecx, 4(v)
+RET_ENDP
 #undef v
 
 #define v  %esi
@@ -67,6 +75,18 @@  RET_ENDP
 BEGIN(add)
 	addl %eax,  (v)
 	adcl %edx, 4(v)
+#ifdef CONFIG_HARDENED_ATOMIC
+	jno 0f
+	subl %eax,  (v)
+	sbbl %edx, 4(v)
+	int $4
+0:
+	_ASM_EXTABLE(0b, 0b)
+#endif
+RET_ENDP
+BEGIN(add_wrap)
+	addl %eax,  (v)
+	adcl %edx, 4(v)
 RET_ENDP
 #undef v
 
@@ -74,6 +94,20 @@  RET_ENDP
 BEGIN(add_return)
 	addl  (v), %eax
 	adcl 4(v), %edx
+#ifdef CONFIG_HARDENED_ATOMIC
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
+	movl %eax,  (v)
+	movl %edx, 4(v)
+#ifdef CONFIG_HARDENED_ATOMIC
+2:
+#endif
+RET_ENDP
+BEGIN(add_return_wrap)
+	addl  (v), %eax
+	adcl 4(v), %edx
 	movl %eax,  (v)
 	movl %edx, 4(v)
 RET_ENDP
@@ -83,6 +117,18 @@  RET_ENDP
 BEGIN(sub)
 	subl %eax,  (v)
 	sbbl %edx, 4(v)
+#ifdef CONFIG_HARDENED_ATOMIC
+	jno 0f
+	addl %eax,  (v)
+	adcl %edx, 4(v)
+	int $4
+0:
+	_ASM_EXTABLE(0b, 0b)
+#endif
+RET_ENDP
+BEGIN(sub_wrap)
+	subl %eax,  (v)
+	sbbl %edx, 4(v)
 RET_ENDP
 #undef v
 
@@ -93,6 +139,23 @@  BEGIN(sub_return)
 	sbbl $0, %edx
 	addl  (v), %eax
 	adcl 4(v), %edx
+#ifdef CONFIG_HARDENED_ATOMIC
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
+	movl %eax,  (v)
+	movl %edx, 4(v)
+#ifdef CONFIG_HARDENED_ATOMIC
+2:
+#endif
+RET_ENDP
+BEGIN(sub_return_wrap)
+	negl %edx
+	negl %eax
+	sbbl $0, %edx
+	addl  (v), %eax
+	adcl 4(v), %edx
 	movl %eax,  (v)
 	movl %edx, 4(v)
 RET_ENDP
@@ -102,6 +165,19 @@  RET_ENDP
 BEGIN(inc)
 	addl $1,  (v)
 	adcl $0, 4(v)
+#ifdef CONFIG_HARDENED_ATOMIC
+	jno 0f
+	subl $1,  (v)
+	sbbl $0, 4(v)
+	int $4
+0:
+	_ASM_EXTABLE(0b, 0b)
+#endif
+
+RET_ENDP
+BEGIN(inc_wrap)
+	addl $1,  (v)
+	adcl $0, 4(v)
 RET_ENDP
 #undef v
 
@@ -111,6 +187,22 @@  BEGIN(inc_return)
 	movl 4(v), %edx
 	addl $1, %eax
 	adcl $0, %edx
+#ifdef CONFIG_HARDENED_ATOMIC
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
+	movl %eax,  (v)
+	movl %edx, 4(v)
+#ifdef CONFIG_HARDENED_ATOMIC
+2:
+#endif
+RET_ENDP
+BEGIN(inc_return_wrap)
+	movl  (v), %eax
+	movl 4(v), %edx
+	addl $1, %eax
+	adcl $0, %edx
 	movl %eax,  (v)
 	movl %edx, 4(v)
 RET_ENDP
@@ -120,6 +212,18 @@  RET_ENDP
 BEGIN(dec)
 	subl $1,  (v)
 	sbbl $0, 4(v)
+#ifdef CONFIG_HARDENED_ATOMIC
+	jno 0f
+	addl $1,  (v)
+	adcl $0, 4(v)
+	int $4
+0:
+	_ASM_EXTABLE(0b, 0b)
+#endif
+RET_ENDP
+BEGIN(dec_wrap)
+	subl $1,  (v)
+	sbbl $0, 4(v)
 RET_ENDP
 #undef v
 
@@ -129,6 +233,22 @@  BEGIN(dec_return)
 	movl 4(v), %edx
 	subl $1, %eax
 	sbbl $0, %edx
+#ifdef CONFIG_HARDENED_ATOMIC
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
+	movl %eax,  (v)
+	movl %edx, 4(v)
+#ifdef CONFIG_HARDENED_ATOMIC
+2:
+#endif
+RET_ENDP
+BEGIN(dec_return_wrap)
+	movl  (v), %eax
+	movl 4(v), %edx
+	subl $1, %eax
+	sbbl $0, %edx
 	movl %eax,  (v)
 	movl %edx, 4(v)
 RET_ENDP
@@ -140,6 +260,11 @@  BEGIN(add_unless)
 	adcl %edx, %edi
 	addl  (v), %eax
 	adcl 4(v), %edx
+#ifdef CONFIG_HARDENED_ATOMIC
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
 	cmpl %eax, %ecx
 	je 3f
 1:
@@ -165,6 +290,11 @@  BEGIN(inc_not_zero)
 1:
 	addl $1, %eax
 	adcl $0, %edx
+#ifdef CONFIG_HARDENED_ATOMIC
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
 	movl %eax,  (v)
 	movl %edx, 4(v)
 	movl $1, %eax
@@ -183,6 +313,11 @@  BEGIN(dec_if_positive)
 	movl 4(v), %edx
 	subl $1, %eax
 	sbbl $0, %edx
+#ifdef CONFIG_HARDENED_ATOMIC
+	into
+1234:
+	_ASM_EXTABLE(1234b, 1f)
+#endif
 	js 1f
 	movl %eax,  (v)
 	movl %edx, 4(v)
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index db3ae854..5bd864e 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -22,9 +22,19 @@ 
 
 ENTRY(atomic64_read_cx8)
 	read64 %ecx
+	/* Pax has pax_force_retaddr here
+	 * do we want similar? If yes, changes
+	 * have to be made in more places below */
 	ret
 ENDPROC(atomic64_read_cx8)
 
+ENTRY(atomic64_read_wrap_cx8)
+	read64 %ecx
+/* do we want smth like the below line?
+ *	pax_force_retaddr */
+	ret
+ENDPROC(atomic64_read_wrap_cx8)
+
 ENTRY(atomic64_set_cx8)
 1:
 /* we don't need LOCK_PREFIX since aligned 64-bit writes
@@ -35,6 +45,17 @@  ENTRY(atomic64_set_cx8)
 	ret
 ENDPROC(atomic64_set_cx8)
 
+ENTRY(atomic64_set_wrap_cx8)
+1:
+/* we don't need LOCK_PREFIX since aligned 64-bit writes
+ * are atomic on 586 and newer */
+	cmpxchg8b (%esi)
+	jne 1b
+
+	/* pax_force_retaddr */
+	ret
+ENDPROC(atomic64_set_wrap_cx8)
+
 ENTRY(atomic64_xchg_cx8)
 1:
 	LOCK_PREFIX
@@ -44,8 +65,8 @@  ENTRY(atomic64_xchg_cx8)
 	ret
 ENDPROC(atomic64_xchg_cx8)
 
-.macro addsub_return func ins insc
-ENTRY(atomic64_\func\()_return_cx8)
+.macro addsub_return func ins insc wrap=""
+ENTRY(atomic64_\func\()_return\wrap\()_cx8)
 	pushl %ebp
 	pushl %ebx
 	pushl %esi
@@ -61,6 +82,13 @@  ENTRY(atomic64_\func\()_return_cx8)
 	movl %edx, %ecx
 	\ins\()l %esi, %ebx
 	\insc\()l %edi, %ecx
+#ifdef CONFIG_HARDENED_ATOMIC
+.ifb \wrap
+	into
+2:
+	_ASM_EXTABLE(2b, 3f)
+.endif
+#endif
 	LOCK_PREFIX
 	cmpxchg8b (%ebp)
 	jne 1b
@@ -68,19 +96,27 @@  ENTRY(atomic64_\func\()_return_cx8)
 10:
 	movl %ebx, %eax
 	movl %ecx, %edx
+
+.ifb \wrap
+#ifdef CONFIG_HARDENED_ATOMIC
+3:
+#endif
+.endif
 	popl %edi
 	popl %esi
 	popl %ebx
 	popl %ebp
 	ret
-ENDPROC(atomic64_\func\()_return_cx8)
+ENDPROC(atomic64_\func\()_return\wrap\()_cx8)
 .endm
 
 addsub_return add add adc
 addsub_return sub sub sbb
+addsub_return add add adc _wrap
+addsub_return sub sub sbb _wrap
 
-.macro incdec_return func ins insc
-ENTRY(atomic64_\func\()_return_cx8)
+.macro incdec_return func ins insc wrap=""
+ENTRY(atomic64_\func\()_return\wrap\()_cx8)
 	pushl %ebx
 
 	read64 %esi
@@ -89,6 +125,13 @@  ENTRY(atomic64_\func\()_return_cx8)
 	movl %edx, %ecx
 	\ins\()l $1, %ebx
 	\insc\()l $0, %ecx
+#ifdef CONFIG_HARDENED_ATOMIC
+.ifb \wrap
+	into
+2:
+	_ASM_EXTABLE(2b, 3f)
+.endif
+#endif
 	LOCK_PREFIX
 	cmpxchg8b (%esi)
 	jne 1b
@@ -96,13 +139,21 @@  ENTRY(atomic64_\func\()_return_cx8)
 10:
 	movl %ebx, %eax
 	movl %ecx, %edx
+
+.ifb \wrap
+#ifdef CONFIG_HARDENED_ATOMIC
+3:
+#endif
+.endif
 	popl %ebx
 	ret
-ENDPROC(atomic64_\func\()_return_cx8)
+ENDPROC(atomic64_\func\()_return\wrap\()_cx8)
 .endm
 
 incdec_return inc add adc
 incdec_return dec sub sbb
+incdec_return inc add adc _wrap
+incdec_return dec sub sbb _wrap
 
 ENTRY(atomic64_dec_if_positive_cx8)
 	pushl %ebx
@@ -113,6 +164,11 @@  ENTRY(atomic64_dec_if_positive_cx8)
 	movl %edx, %ecx
 	subl $1, %ebx
 	sbb $0, %ecx
+#ifdef CONFIG_HARDENED_ATOMIC
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
 	js 2f
 	LOCK_PREFIX
 	cmpxchg8b (%esi)
@@ -144,6 +200,11 @@  ENTRY(atomic64_add_unless_cx8)
 	movl %edx, %ecx
 	addl %ebp, %ebx
 	adcl %edi, %ecx
+#ifdef CONFIG_HARDENED_ATOMIC
+	into
+1234:
+	_ASM_EXTABLE(1234b, 3f)
+#endif
 	LOCK_PREFIX
 	cmpxchg8b (%esi)
 	jne 1b
@@ -173,6 +234,11 @@  ENTRY(atomic64_inc_not_zero_cx8)
 	xorl %ecx, %ecx
 	addl $1, %ebx
 	adcl %edx, %ecx
+#ifdef CONFIG_HARDENED_ATOMIC
+	into
+1234:
+	_ASM_EXTABLE(1234b, 3f)
+#endif
 	LOCK_PREFIX
 	cmpxchg8b (%esi)
 	jne 1b