From patchwork Mon Nov 4 14:27:43 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marco Elver X-Patchwork-Id: 11225933 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id A654C1390 for ; Mon, 4 Nov 2019 14:29:20 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 7B64922479 for ; Mon, 4 Nov 2019 14:29:20 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=pass (2048-bit key) header.d=google.com header.i=@google.com header.b="U/XYdcDe" Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1729121AbfKDO3Q (ORCPT ); Mon, 4 Nov 2019 09:29:16 -0500 Received: from mail-wm1-f73.google.com ([209.85.128.73]:40007 "EHLO mail-wm1-f73.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1729054AbfKDO3Q (ORCPT ); Mon, 4 Nov 2019 09:29:16 -0500 Received: by mail-wm1-f73.google.com with SMTP id o202so5068494wme.5 for ; Mon, 04 Nov 2019 06:29:13 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20161025; h=date:in-reply-to:message-id:mime-version:references:subject:from:to :cc; bh=dwkrnZGKd1o82vLncnnMx5+hskPdjM10M9XkepioArw=; b=U/XYdcDexJdJytPXnvebCsib+0GCSIPOwwp7Our+Srw04lF1o6qWXtQirdZaNuJDvi PWCZjs8lgfF3WbZz4xb/auZ2JLOMXS22DAbDUAir83/a9im8xzHbMI+3Gu++FTfIcLfu 4N6smqE432wGYXiOL/zZLNF84FWK873RmTrhXDXetMe6YOErFUcq8DANBux5E8sX7L/n ewoK6G/O2gCYatjbKfhV816P7ABJJQjkKZ0IEM6z5SdBMPbKmrPBObno0g9wFbE9TlXT Po2bJCggaH5lttawH0WEhd/kX3X2ZHKDLyD6dRTKJO2pU31u2DoYewsgALhVOX4WYheA iS5g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:date:in-reply-to:message-id:mime-version :references:subject:from:to:cc; bh=dwkrnZGKd1o82vLncnnMx5+hskPdjM10M9XkepioArw=; b=PAKu4LQ1Mi+xOW3rCozeWlHv+jcOdnkWts75kG6pPaPD+fKFepalpD7/lPngHsXfOi IUMXx4/2N03gTMygzAb9RDR34NMTsyaZKD0TQOhnWNk5YXbfFwT1WXeNv6y8SUKOppmv f24pyo+T0hQB1mCnTLEJFgLIY/3Ce55DuoLsQTArIiyG4MvR7yvwGbGe0jy9/2BVrnoh KRn+0klaVV98CqhT1+OX1DCDw55ZBFm4y+KPG8qMLD+LryZB6HS6cHd+jlYodXOc5BsZ 5mMDUk6w42qm3zbN0DKAcEElwpYQ+/ZiZsZK1e2quUd30fc54kxLqvrft11xDt1q2SHe Jz6A== X-Gm-Message-State: APjAAAURBWDenbIIUaGI30YPE6gvsZxpuv+lJc24Sco10V1sWWNiPWeN GEx9HjJXMQiSmD18EZQa2F1HoApFUg== X-Google-Smtp-Source: APXvYqwCl8PFLBkBfL5EK6ygI9RFEdP8s5WO4pmDF1xMh0RzOTTNq2VfQ/1s2B6lgxYW8RHEScbPyJRLvw== X-Received: by 2002:a5d:5591:: with SMTP id i17mr22415611wrv.151.1572877752204; Mon, 04 Nov 2019 06:29:12 -0800 (PST) Date: Mon, 4 Nov 2019 15:27:43 +0100 In-Reply-To: <20191104142745.14722-1-elver@google.com> Message-Id: <20191104142745.14722-8-elver@google.com> Mime-Version: 1.0 References: <20191104142745.14722-1-elver@google.com> X-Mailer: git-send-email 2.24.0.rc1.363.gb1bccd3e3d-goog Subject: [PATCH v3 7/9] asm-generic, kcsan: Add KCSAN instrumentation for bitops From: Marco Elver To: elver@google.com Cc: akiyks@gmail.com, stern@rowland.harvard.edu, glider@google.com, parri.andrea@gmail.com, andreyknvl@google.com, luto@kernel.org, ard.biesheuvel@linaro.org, arnd@arndb.de, boqun.feng@gmail.com, bp@alien8.de, dja@axtens.net, dlustig@nvidia.com, dave.hansen@linux.intel.com, dhowells@redhat.com, dvyukov@google.com, hpa@zytor.com, mingo@redhat.com, j.alglave@ucl.ac.uk, joel@joelfernandes.org, corbet@lwn.net, jpoimboe@redhat.com, luc.maranget@inria.fr, mark.rutland@arm.com, npiggin@gmail.com, paulmck@kernel.org, peterz@infradead.org, tglx@linutronix.de, will@kernel.org, kasan-dev@googlegroups.com, linux-arch@vger.kernel.org, linux-doc@vger.kernel.org, linux-efi@vger.kernel.org, linux-kbuild@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, x86@kernel.org Sender: linux-kbuild-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kbuild@vger.kernel.org Add explicit KCSAN checks for bitops. Signed-off-by: Marco Elver --- v2: * Use kcsan_check{,_atomic}_{read,write} instead of kcsan_check_{access,atomic}. --- include/asm-generic/bitops-instrumented.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/include/asm-generic/bitops-instrumented.h b/include/asm-generic/bitops-instrumented.h index ddd1c6d9d8db..864d707cdb87 100644 --- a/include/asm-generic/bitops-instrumented.h +++ b/include/asm-generic/bitops-instrumented.h @@ -12,6 +12,7 @@ #define _ASM_GENERIC_BITOPS_INSTRUMENTED_H #include +#include /** * set_bit - Atomically set a bit in memory @@ -26,6 +27,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_set_bit(nr, addr); } @@ -41,6 +43,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr) static inline void __set_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_write(addr + BIT_WORD(nr), sizeof(long)); arch___set_bit(nr, addr); } @@ -54,6 +57,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr) static inline void clear_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit(nr, addr); } @@ -69,6 +73,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr) static inline void __clear_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit(nr, addr); } @@ -82,6 +87,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr) static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit_unlock(nr, addr); } @@ -97,6 +103,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit_unlock(nr, addr); } @@ -113,6 +120,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) static inline void change_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_change_bit(nr, addr); } @@ -128,6 +136,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) static inline void __change_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_write(addr + BIT_WORD(nr), sizeof(long)); arch___change_bit(nr, addr); } @@ -141,6 +150,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr) static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_set_bit(nr, addr); } @@ -155,6 +165,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_write(addr + BIT_WORD(nr), sizeof(long)); return arch___test_and_set_bit(nr, addr); } @@ -170,6 +181,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_set_bit_lock(nr, addr); } @@ -183,6 +195,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_clear_bit(nr, addr); } @@ -197,6 +210,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_write(addr + BIT_WORD(nr), sizeof(long)); return arch___test_and_clear_bit(nr, addr); } @@ -210,6 +224,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_change_bit(nr, addr); } @@ -224,6 +239,7 @@ static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_write(addr + BIT_WORD(nr), sizeof(long)); return arch___test_and_change_bit(nr, addr); } @@ -235,6 +251,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) static inline bool test_bit(long nr, const volatile unsigned long *addr) { kasan_check_read(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_atomic_read(addr + BIT_WORD(nr), sizeof(long)); return arch_test_bit(nr, addr); } @@ -254,6 +271,7 @@ static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + kcsan_check_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_clear_bit_unlock_is_negative_byte(nr, addr); } /* Let everybody know we have it. */