Message ID | 1476472255-15851-1-git-send-email-colin@cvidal.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Fri, 2016-10-14 at 21:10 +0200, Colin Vidal wrote: > Hi Elena, > > I don't know if it may helps for the v2, but here is the (tiny) > modifications I have done on a generic part to be able to build on ARM > without CONFIG_HARDENING_ATOMIC (of course, there is also some > modifications in arch/arm, but it is not the subject, and it only a > prototype for now). It does not break x86 builds. > > It basically a reordering / guard on macros atomic_*_wrap in order to > avoid implicitly defined / redefined error about them, when > CONFIG_HARDENED_ATOMIC is unset. > > Thanks, > > Colin > > --- > include/asm-generic/atomic-long.h | 43 ++++++++++++++++----------------------- > 1 file changed, 18 insertions(+), 25 deletions(-) > > diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h > index 790cb00..6f1dc3e 100644 > --- a/include/asm-generic/atomic-long.h > +++ b/include/asm-generic/atomic-long.h > @@ -46,6 +46,22 @@ typedef atomic_t atomic_long_wrap_t; > > #endif > > +#ifndef CONFIG_HARDENED_ATOMIC > +#define atomic_read_wrap(v) atomic_read(v) > +#define atomic_set_wrap(v, i) atomic_set((v), (i)) > +#define atomic_add_wrap(i, v) atomic_add((i), (v)) > +#define atomic_add_unless_wrap(v, i, j) atomic_add_unless((v), (i), (j)) > +#define atomic_sub_wrap(i, v) atomic_sub((i), (v)) > +#define atomic_inc_wrap(v) atomic_inc(v) > +#define atomic_inc_and_test_wrap(v) atomic_inc_and_test(v) > +#define atomic_inc_return_wrap(v) atomic_inc_return(v) > +#define atomic_add_return_wrap(i, v) atomic_add_return((i), (v)) > +#define atomic_dec_wrap(v) atomic_dec(v) > +#define atomic_cmpxchg_wrap(v, o, n) atomic_cmpxchg((v), (o), (n)) > +#define atomic_xchg_wrap(v, i) atomic_xchg((v), (i)) > +#define atomic_long_sub_and_test_wrap(i, v) atomic_long_sub_and_test(i, v) > +#endif /* CONFIG_HARDENED_ATOMIC */ > + > #define ATOMIC_LONG_READ_OP(mo, suffix) \ > static inline long atomic_long_read##mo##suffix(const atomic_long##suffix##_t *l)\ > { \ > @@ -233,12 +249,14 @@ static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) > return ATOMIC_LONG_PFX(_sub_and_test)(i, v); > } > > +#ifdef CONFIG_HARDENED_ATOMIC > static inline int atomic_long_sub_and_test_wrap(long i, atomic_long_wrap_t *l) > { > ATOMIC_LONG_PFX(_wrap_t) *v = (ATOMIC_LONG_PFX(_wrap_t) *)l; > > return ATOMIC_LONG_PFX(_sub_and_test_wrap)(i, v); > } > +#endif > > static inline int atomic_long_dec_and_test(atomic_long_t *l) > { > @@ -291,29 +309,4 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) > #define atomic_long_inc_not_zero(l) \ > ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) > > -#ifndef CONFIG_HARDENED_ATOMIC > -#define atomic_read_wrap(v) atomic_read(v) > -#define atomic_set_wrap(v, i) atomic_set((v), (i)) > -#define atomic_add_wrap(i, v) atomic_add((i), (v)) > -#define atomic_add_unless_wrap(v, i, j) atomic_add_unless((v), (i), (j)) > -#define atomic_sub_wrap(i, v) atomic_sub((i), (v)) > -#define atomic_inc_wrap(v) atomic_inc(v) > -#define atomic_inc_and_test_wrap(v) atomic_inc_and_test(v) > -#define atomic_inc_return_wrap(v) atomic_inc_return(v) > -#define atomic_add_return_wrap(i, v) atomic_add_return((i), (v)) > -#define atomic_dec_wrap(v) atomic_dec(v) > -#define atomic_cmpxchg_wrap(v, o, n) atomic_cmpxchg((v), (o), (n)) > -#define atomic_xchg_wrap(v, i) atomic_xchg((v), (i)) > -#define atomic_long_read_wrap(v) atomic_long_read(v) > -#define atomic_long_set_wrap(v, i) atomic_long_set((v), (i)) > -#define atomic_long_add_wrap(i, v) atomic_long_add((i), (v)) > -#define atomic_long_sub_wrap(i, v) atomic_long_sub((i), (v)) > -#define atomic_long_inc_wrap(v) atomic_long_inc(v) > -#define atomic_long_add_return_wrap(i, v) atomic_long_add_return((i), (v)) > -#define atomic_long_inc_return_wrap(v) atomic_long_inc_return(v) > -#define atomic_long_sub_and_test_wrap(i, v) atomic_long_sub_and_test((i), (v)) > -#define atomic_long_dec_wrap(v) atomic_long_dec(v) > -#define atomic_long_xchg_wrap(v, i) atomic_long_xchg((v), (i)) > -#endif /* CONFIG_HARDENED_ATOMIC */ > - > #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ Argh. I actually had to make some modifications on it today... I will resend a new version with a RFC of the arm port as soon as I finish it. Colin
Hi Colin, On Fri, Oct 14, 2016 at 09:10:55PM +0200, Colin Vidal wrote: > Hi Elena, > > I don't know if it may helps for the v2, but here is the (tiny) > modifications I have done on a generic part to be able to build on ARM > without CONFIG_HARDENING_ATOMIC (of course, there is also some > modifications in arch/arm, but it is not the subject, and it only a > prototype for now). It does not break x86 builds. I saw the same problem when compiling on arm64, but > It basically a reordering / guard on macros atomic_*_wrap in order to I don't think that we need re-ordering, and > avoid implicitly defined / redefined error about them, when > CONFIG_HARDENED_ATOMIC is unset. > > Thanks, > > Colin > > --- > include/asm-generic/atomic-long.h | 43 ++++++++++++++++----------------------- > 1 file changed, 18 insertions(+), 25 deletions(-) > > diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h > index 790cb00..6f1dc3e 100644 > --- a/include/asm-generic/atomic-long.h > +++ b/include/asm-generic/atomic-long.h > @@ -46,6 +46,22 @@ typedef atomic_t atomic_long_wrap_t; > > #endif > > +#ifndef CONFIG_HARDENED_ATOMIC > +#define atomic_read_wrap(v) atomic_read(v) > +#define atomic_set_wrap(v, i) atomic_set((v), (i)) > +#define atomic_add_wrap(i, v) atomic_add((i), (v)) > +#define atomic_add_unless_wrap(v, i, j) atomic_add_unless((v), (i), (j)) > +#define atomic_sub_wrap(i, v) atomic_sub((i), (v)) > +#define atomic_inc_wrap(v) atomic_inc(v) > +#define atomic_inc_and_test_wrap(v) atomic_inc_and_test(v) > +#define atomic_inc_return_wrap(v) atomic_inc_return(v) > +#define atomic_add_return_wrap(i, v) atomic_add_return((i), (v)) > +#define atomic_dec_wrap(v) atomic_dec(v) > +#define atomic_cmpxchg_wrap(v, o, n) atomic_cmpxchg((v), (o), (n)) > +#define atomic_xchg_wrap(v, i) atomic_xchg((v), (i)) > +#define atomic_long_sub_and_test_wrap(i, v) atomic_long_sub_and_test(i, v) removing this definition (atomic_long_sub_and_test_wrap) in the original location and > +#endif /* CONFIG_HARDENED_ATOMIC */ > + > #define ATOMIC_LONG_READ_OP(mo, suffix) \ > static inline long atomic_long_read##mo##suffix(const atomic_long##suffix##_t *l)\ > { \ > @@ -233,12 +249,14 @@ static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) > return ATOMIC_LONG_PFX(_sub_and_test)(i, v); > } > > +#ifdef CONFIG_HARDENED_ATOMIC removing this guard would work both for HARDENED_ATOMIC and !HARDENED_ATOMIC. Anyway, IMO, we should (and can) generate _wrap version of definitions in a more systematic way to cover whole functions. Thanks, -Takahiro AKASHI > static inline int atomic_long_sub_and_test_wrap(long i, atomic_long_wrap_t *l) > { > ATOMIC_LONG_PFX(_wrap_t) *v = (ATOMIC_LONG_PFX(_wrap_t) *)l; > > return ATOMIC_LONG_PFX(_sub_and_test_wrap)(i, v); > } > +#endif > > static inline int atomic_long_dec_and_test(atomic_long_t *l) > { > @@ -291,29 +309,4 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) > #define atomic_long_inc_not_zero(l) \ > ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) > > -#ifndef CONFIG_HARDENED_ATOMIC > -#define atomic_read_wrap(v) atomic_read(v) > -#define atomic_set_wrap(v, i) atomic_set((v), (i)) > -#define atomic_add_wrap(i, v) atomic_add((i), (v)) > -#define atomic_add_unless_wrap(v, i, j) atomic_add_unless((v), (i), (j)) > -#define atomic_sub_wrap(i, v) atomic_sub((i), (v)) > -#define atomic_inc_wrap(v) atomic_inc(v) > -#define atomic_inc_and_test_wrap(v) atomic_inc_and_test(v) > -#define atomic_inc_return_wrap(v) atomic_inc_return(v) > -#define atomic_add_return_wrap(i, v) atomic_add_return((i), (v)) > -#define atomic_dec_wrap(v) atomic_dec(v) > -#define atomic_cmpxchg_wrap(v, o, n) atomic_cmpxchg((v), (o), (n)) > -#define atomic_xchg_wrap(v, i) atomic_xchg((v), (i)) > -#define atomic_long_read_wrap(v) atomic_long_read(v) > -#define atomic_long_set_wrap(v, i) atomic_long_set((v), (i)) > -#define atomic_long_add_wrap(i, v) atomic_long_add((i), (v)) > -#define atomic_long_sub_wrap(i, v) atomic_long_sub((i), (v)) > -#define atomic_long_inc_wrap(v) atomic_long_inc(v) > -#define atomic_long_add_return_wrap(i, v) atomic_long_add_return((i), (v)) > -#define atomic_long_inc_return_wrap(v) atomic_long_inc_return(v) > -#define atomic_long_sub_and_test_wrap(i, v) atomic_long_sub_and_test((i), (v)) > -#define atomic_long_dec_wrap(v) atomic_long_dec(v) > -#define atomic_long_xchg_wrap(v, i) atomic_long_xchg((v), (i)) > -#endif /* CONFIG_HARDENED_ATOMIC */ > - > #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ > -- > 2.7.4 >
Hi Takahiro, > > +#ifndef CONFIG_HARDENED_ATOMIC > > +#define atomic_read_wrap(v) atomic_read(v) > > +#define atomic_set_wrap(v, i) atomic_set((v), (i)) > > +#define atomic_add_wrap(i, v) atomic_add((i), (v)) > > +#define atomic_add_unless_wrap(v, i, j) atomic_add_unless((v), (i), (j)) > > +#define atomic_sub_wrap(i, v) atomic_sub((i), (v)) > > +#define atomic_inc_wrap(v) atomic_inc(v) > > +#define atomic_inc_and_test_wrap(v) atomic_inc_and_test(v) > > +#define atomic_inc_return_wrap(v) atomic_inc_return(v) > > +#define atomic_add_return_wrap(i, v) atomic_add_return((i), (v)) > > +#define atomic_dec_wrap(v) atomic_dec(v) > > +#define atomic_cmpxchg_wrap(v, o, n) atomic_cmpxchg((v), (o), (n)) > > +#define atomic_xchg_wrap(v, i) atomic_xchg((v), (i)) > > +#define atomic_long_sub_and_test_wrap(i, v) atomic_long_sub_and_test(i, v) > > removing this definition (atomic_long_sub_and_test_wrap) in > the original location and > > > > > +#endif /* CONFIG_HARDENED_ATOMIC */ > > + > > #define ATOMIC_LONG_READ_OP(mo, suffix) \ > > static inline long atomic_long_read##mo##suffix(const atomic_long##suffix##_t *l)\ > > { \ > > @@ -233,12 +249,14 @@ static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) > > return ATOMIC_LONG_PFX(_sub_and_test)(i, v); > > } > > > > +#ifdef CONFIG_HARDENED_ATOMIC > removing this guard would work both for HARDENED_ATOMIC and > !HARDENED_ATOMIC. > > Anyway, IMO, we should (and can) generate _wrap version of definitions > in a more systematic way to cover whole functions. Absolutely, thanks. I will remove them and re-test this evening. Besides, I actually try to be must generic possible (I don't know in arm64, but in arm most of atomic functions are generated via macro). Still, I fall down on some specific cases. For instance, we need to guard atomic_inc_return_wrap in the ifndef bloc (it is missing in my previous patch) since the definition is made via a macro in arm (via atomic_inc_return_wrap_relaxed, which will define and generate atomic_inc_return_wrap in include/linux/atomic.h). Except if there is a big unexpected problem, I will send a RFC this week (overflow detection begin to works in my first attempt), I would be grateful if there is a nicer solution to those problem :-) Thanks! Colin
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 790cb00..6f1dc3e 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -46,6 +46,22 @@ typedef atomic_t atomic_long_wrap_t; #endif +#ifndef CONFIG_HARDENED_ATOMIC +#define atomic_read_wrap(v) atomic_read(v) +#define atomic_set_wrap(v, i) atomic_set((v), (i)) +#define atomic_add_wrap(i, v) atomic_add((i), (v)) +#define atomic_add_unless_wrap(v, i, j) atomic_add_unless((v), (i), (j)) +#define atomic_sub_wrap(i, v) atomic_sub((i), (v)) +#define atomic_inc_wrap(v) atomic_inc(v) +#define atomic_inc_and_test_wrap(v) atomic_inc_and_test(v) +#define atomic_inc_return_wrap(v) atomic_inc_return(v) +#define atomic_add_return_wrap(i, v) atomic_add_return((i), (v)) +#define atomic_dec_wrap(v) atomic_dec(v) +#define atomic_cmpxchg_wrap(v, o, n) atomic_cmpxchg((v), (o), (n)) +#define atomic_xchg_wrap(v, i) atomic_xchg((v), (i)) +#define atomic_long_sub_and_test_wrap(i, v) atomic_long_sub_and_test(i, v) +#endif /* CONFIG_HARDENED_ATOMIC */ + #define ATOMIC_LONG_READ_OP(mo, suffix) \ static inline long atomic_long_read##mo##suffix(const atomic_long##suffix##_t *l)\ { \ @@ -233,12 +249,14 @@ static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) return ATOMIC_LONG_PFX(_sub_and_test)(i, v); } +#ifdef CONFIG_HARDENED_ATOMIC static inline int atomic_long_sub_and_test_wrap(long i, atomic_long_wrap_t *l) { ATOMIC_LONG_PFX(_wrap_t) *v = (ATOMIC_LONG_PFX(_wrap_t) *)l; return ATOMIC_LONG_PFX(_sub_and_test_wrap)(i, v); } +#endif static inline int atomic_long_dec_and_test(atomic_long_t *l) { @@ -291,29 +309,4 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) #define atomic_long_inc_not_zero(l) \ ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) -#ifndef CONFIG_HARDENED_ATOMIC -#define atomic_read_wrap(v) atomic_read(v) -#define atomic_set_wrap(v, i) atomic_set((v), (i)) -#define atomic_add_wrap(i, v) atomic_add((i), (v)) -#define atomic_add_unless_wrap(v, i, j) atomic_add_unless((v), (i), (j)) -#define atomic_sub_wrap(i, v) atomic_sub((i), (v)) -#define atomic_inc_wrap(v) atomic_inc(v) -#define atomic_inc_and_test_wrap(v) atomic_inc_and_test(v) -#define atomic_inc_return_wrap(v) atomic_inc_return(v) -#define atomic_add_return_wrap(i, v) atomic_add_return((i), (v)) -#define atomic_dec_wrap(v) atomic_dec(v) -#define atomic_cmpxchg_wrap(v, o, n) atomic_cmpxchg((v), (o), (n)) -#define atomic_xchg_wrap(v, i) atomic_xchg((v), (i)) -#define atomic_long_read_wrap(v) atomic_long_read(v) -#define atomic_long_set_wrap(v, i) atomic_long_set((v), (i)) -#define atomic_long_add_wrap(i, v) atomic_long_add((i), (v)) -#define atomic_long_sub_wrap(i, v) atomic_long_sub((i), (v)) -#define atomic_long_inc_wrap(v) atomic_long_inc(v) -#define atomic_long_add_return_wrap(i, v) atomic_long_add_return((i), (v)) -#define atomic_long_inc_return_wrap(v) atomic_long_inc_return(v) -#define atomic_long_sub_and_test_wrap(i, v) atomic_long_sub_and_test((i), (v)) -#define atomic_long_dec_wrap(v) atomic_long_dec(v) -#define atomic_long_xchg_wrap(v, i) atomic_long_xchg((v), (i)) -#endif /* CONFIG_HARDENED_ATOMIC */ - #endif /* _ASM_GENERIC_ATOMIC_LONG_H */