Message ID | 20211005105905.1994700-6-elver@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | kcsan: Support detecting a subset of missing memory barriers | expand |
On Tue, Oct 05, 2021 at 12:58:47PM +0200, Marco Elver wrote: > +static __always_inline void kcsan_atomic_release(int memorder) > +{ > + if (memorder == __ATOMIC_RELEASE || > + memorder == __ATOMIC_SEQ_CST || > + memorder == __ATOMIC_ACQ_REL) > + __kcsan_release(); > +} > + > #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \ > u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \ > u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \ > { \ > + kcsan_atomic_release(memorder); \ > if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ > check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \ > } \ > @@ -1156,6 +1187,7 @@ EXPORT_SYMBOL(__tsan_init); > void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \ > void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \ > { \ > + kcsan_atomic_release(memorder); \ > if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ > check_access(ptr, bits / BITS_PER_BYTE, \ > KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \ > @@ -1168,6 +1200,7 @@ EXPORT_SYMBOL(__tsan_init); > u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \ > u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \ > { \ > + kcsan_atomic_release(memorder); \ > if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ > check_access(ptr, bits / BITS_PER_BYTE, \ > KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ > @@ -1200,6 +1233,7 @@ EXPORT_SYMBOL(__tsan_init); > int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \ > u##bits val, int mo, int fail_mo) \ > { \ > + kcsan_atomic_release(mo); \ > if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ > check_access(ptr, bits / BITS_PER_BYTE, \ > KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ > @@ -1215,6 +1249,7 @@ EXPORT_SYMBOL(__tsan_init); > u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \ > int mo, int fail_mo) \ > { \ > + kcsan_atomic_release(mo); \ > if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ > check_access(ptr, bits / BITS_PER_BYTE, \ > KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ > @@ -1246,6 +1281,7 @@ DEFINE_TSAN_ATOMIC_OPS(64); > void __tsan_atomic_thread_fence(int memorder); > void __tsan_atomic_thread_fence(int memorder) > { > + kcsan_atomic_release(memorder); > __atomic_thread_fence(memorder); > } > EXPORT_SYMBOL(__tsan_atomic_thread_fence); I find that very hard to read.. kcsan_atomic_release() it not in fact a release. It might be a release if @memorder implies one.
On Tue, Oct 05, 2021 at 01:41:18PM +0200, Peter Zijlstra wrote: > On Tue, Oct 05, 2021 at 12:58:47PM +0200, Marco Elver wrote: > > +static __always_inline void kcsan_atomic_release(int memorder) > > +{ > > + if (memorder == __ATOMIC_RELEASE || > > + memorder == __ATOMIC_SEQ_CST || > > + memorder == __ATOMIC_ACQ_REL) > > + __kcsan_release(); > > +} > > + > > #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \ > > u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \ > > u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \ > > { \ > > + kcsan_atomic_release(memorder); \ > > if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ > > check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \ > > } \ > > @@ -1156,6 +1187,7 @@ EXPORT_SYMBOL(__tsan_init); > > void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \ > > void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \ > > { \ > > + kcsan_atomic_release(memorder); \ > > if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ > > check_access(ptr, bits / BITS_PER_BYTE, \ > > KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \ > > @@ -1168,6 +1200,7 @@ EXPORT_SYMBOL(__tsan_init); > > u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \ > > u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \ > > { \ > > + kcsan_atomic_release(memorder); \ > > if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ > > check_access(ptr, bits / BITS_PER_BYTE, \ > > KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ > > @@ -1200,6 +1233,7 @@ EXPORT_SYMBOL(__tsan_init); > > int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \ > > u##bits val, int mo, int fail_mo) \ > > { \ > > + kcsan_atomic_release(mo); \ > > if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ > > check_access(ptr, bits / BITS_PER_BYTE, \ > > KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ > > @@ -1215,6 +1249,7 @@ EXPORT_SYMBOL(__tsan_init); > > u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \ > > int mo, int fail_mo) \ > > { \ > > + kcsan_atomic_release(mo); \ > > if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ > > check_access(ptr, bits / BITS_PER_BYTE, \ > > KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ > > @@ -1246,6 +1281,7 @@ DEFINE_TSAN_ATOMIC_OPS(64); > > void __tsan_atomic_thread_fence(int memorder); > > void __tsan_atomic_thread_fence(int memorder) > > { > > + kcsan_atomic_release(memorder); > > __atomic_thread_fence(memorder); > > } > > EXPORT_SYMBOL(__tsan_atomic_thread_fence); > > I find that very hard to read.. kcsan_atomic_release() it not in fact a > release. It might be a release if @memorder implies one. Also, what's the atomic part signify? Is that because you're modeling the difference in acquire/release semantics between smp_load_{acquire,release}() and atomic*_{acquire,release}() ?
On Tue, 5 Oct 2021 at 13:45, Peter Zijlstra <peterz@infradead.org> wrote: > On Tue, Oct 05, 2021 at 01:41:18PM +0200, Peter Zijlstra wrote: > > On Tue, Oct 05, 2021 at 12:58:47PM +0200, Marco Elver wrote: > > > +static __always_inline void kcsan_atomic_release(int memorder) > > > +{ > > > + if (memorder == __ATOMIC_RELEASE || > > > + memorder == __ATOMIC_SEQ_CST || > > > + memorder == __ATOMIC_ACQ_REL) > > > + __kcsan_release(); > > > +} > > > + [...] > > > + kcsan_atomic_release(memorder); > > > __atomic_thread_fence(memorder); > > > } > > > EXPORT_SYMBOL(__tsan_atomic_thread_fence); > > > > I find that very hard to read.. kcsan_atomic_release() it not in fact a > > release. It might be a release if @memorder implies one. You're right, this name can be improved. `kcsan_atomic_builtin_memorder(..)` is probably better > Also, what's the atomic part signify? Is that because you're modeling > the difference in acquire/release semantics between > smp_load_{acquire,release}() and atomic*_{acquire,release}() ? Sorry, just a bad name. It's about the builtins. The above suggested name should hopefully be clearer.
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index a1c6a89fde71..c9e7c39a7d7b 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -36,6 +36,26 @@ */ void __kcsan_check_access(const volatile void *ptr, size_t size, int type); +/** + * __kcsan_mb - full memory barrier instrumentation + */ +void __kcsan_mb(void); + +/** + * __kcsan_wmb - write memory barrier instrumentation + */ +void __kcsan_wmb(void); + +/** + * __kcsan_rmb - read memory barrier instrumentation + */ +void __kcsan_rmb(void); + +/** + * __kcsan_release - release barrier instrumentation + */ +void __kcsan_release(void); + /** * kcsan_disable_current - disable KCSAN for the current context * @@ -159,6 +179,10 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa); static inline void __kcsan_check_access(const volatile void *ptr, size_t size, int type) { } +static inline void __kcsan_mb(void) { } +static inline void __kcsan_wmb(void) { } +static inline void __kcsan_rmb(void) { } +static inline void __kcsan_release(void) { } static inline void kcsan_disable_current(void) { } static inline void kcsan_enable_current(void) { } static inline void kcsan_enable_current_nowarn(void) { } @@ -191,12 +215,25 @@ static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { } */ #define __kcsan_disable_current kcsan_disable_current #define __kcsan_enable_current kcsan_enable_current_nowarn -#else +#else /* __SANITIZE_THREAD__ */ static inline void kcsan_check_access(const volatile void *ptr, size_t size, int type) { } static inline void __kcsan_enable_current(void) { } static inline void __kcsan_disable_current(void) { } -#endif +#endif /* __SANITIZE_THREAD__ */ + +#if defined(CONFIG_KCSAN_WEAK_MEMORY) && \ + (defined(__SANITIZE_THREAD__) || defined(__KCSAN_INSTRUMENT_BARRIERS__)) +#define kcsan_mb __kcsan_mb +#define kcsan_wmb __kcsan_wmb +#define kcsan_rmb __kcsan_rmb +#define kcsan_release __kcsan_release +#else /* CONFIG_KCSAN_WEAK_MEMORY && (__SANITIZE_THREAD__ || __KCSAN_INSTRUMENT_BARRIERS__) */ +static inline void kcsan_mb(void) { } +static inline void kcsan_wmb(void) { } +static inline void kcsan_rmb(void) { } +static inline void kcsan_release(void) { } +#endif /* CONFIG_KCSAN_WEAK_MEMORY && (__SANITIZE_THREAD__ || __KCSAN_INSTRUMENT_BARRIERS__) */ /** * __kcsan_check_read - check regular read access for races diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 0e180d1cd53d..47c95ccff19f 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -955,6 +955,28 @@ void __kcsan_check_access(const volatile void *ptr, size_t size, int type) } EXPORT_SYMBOL(__kcsan_check_access); +#define DEFINE_MEMORY_BARRIER(name, order_before_cond) \ + kcsan_noinstr void __kcsan_##name(void) \ + { \ + struct kcsan_scoped_access *sa; \ + if (within_noinstr(_RET_IP_)) \ + return; \ + instrumentation_begin(); \ + sa = get_reorder_access(get_ctx()); \ + if (!sa) \ + goto out; \ + if (order_before_cond) \ + sa->size = 0; \ + out: \ + instrumentation_end(); \ + } \ + EXPORT_SYMBOL(__kcsan_##name) + +DEFINE_MEMORY_BARRIER(mb, true); +DEFINE_MEMORY_BARRIER(wmb, sa->type & (KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)); +DEFINE_MEMORY_BARRIER(rmb, !(sa->type & KCSAN_ACCESS_WRITE) || (sa->type & KCSAN_ACCESS_COMPOUND)); +DEFINE_MEMORY_BARRIER(release, true); + /* * KCSAN uses the same instrumentation that is emitted by supported compilers * for ThreadSanitizer (TSAN). @@ -1143,10 +1165,19 @@ EXPORT_SYMBOL(__tsan_init); * functions, whose job is to also execute the operation itself. */ +static __always_inline void kcsan_atomic_release(int memorder) +{ + if (memorder == __ATOMIC_RELEASE || + memorder == __ATOMIC_SEQ_CST || + memorder == __ATOMIC_ACQ_REL) + __kcsan_release(); +} + #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \ u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \ u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \ { \ + kcsan_atomic_release(memorder); \ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \ } \ @@ -1156,6 +1187,7 @@ EXPORT_SYMBOL(__tsan_init); void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \ void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \ { \ + kcsan_atomic_release(memorder); \ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, \ KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \ @@ -1168,6 +1200,7 @@ EXPORT_SYMBOL(__tsan_init); u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \ u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \ { \ + kcsan_atomic_release(memorder); \ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, \ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ @@ -1200,6 +1233,7 @@ EXPORT_SYMBOL(__tsan_init); int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \ u##bits val, int mo, int fail_mo) \ { \ + kcsan_atomic_release(mo); \ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, \ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ @@ -1215,6 +1249,7 @@ EXPORT_SYMBOL(__tsan_init); u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \ int mo, int fail_mo) \ { \ + kcsan_atomic_release(mo); \ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, \ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ @@ -1246,6 +1281,7 @@ DEFINE_TSAN_ATOMIC_OPS(64); void __tsan_atomic_thread_fence(int memorder); void __tsan_atomic_thread_fence(int memorder) { + kcsan_atomic_release(memorder); __atomic_thread_fence(memorder); } EXPORT_SYMBOL(__tsan_atomic_thread_fence);
Add the core memory barrier instrumentation functions. These invalidate the current in-flight reordered access based on the rules for the respective barrier types and in-flight access type. Signed-off-by: Marco Elver <elver@google.com> --- include/linux/kcsan-checks.h | 41 ++++++++++++++++++++++++++++++++++-- kernel/kcsan/core.c | 36 +++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 2 deletions(-)