diff mbox series

[2/2] kmsan: prevent optimizations in memcpy tests

Message ID 20230907130642.245222-2-glider@google.com (mailing list archive)
State New
Headers show
Series [1/2] kmsan: simplify kmsan_internal_memmove_metadata() | expand

Commit Message

Alexander Potapenko Sept. 7, 2023, 1:06 p.m. UTC
Clang 18 learned to optimize away memcpy() calls of small uninitialized
scalar values. To ensure that memcpy tests in kmsan_test.c still perform
calls to memcpy() (which KMSAN replaces with __msan_memcpy()), declare a
separate memcpy_noinline() function with volatile parameters, which
won't be optimized.

Also retire DO_NOT_OPTIMIZE(), as memcpy_noinline() is apparently
enough.

Signed-off-by: Alexander Potapenko <glider@google.com>
---
 mm/kmsan/kmsan_test.c | 37 ++++++++++++++-----------------------
 1 file changed, 14 insertions(+), 23 deletions(-)

Comments

kernel test robot Sept. 10, 2023, 12:31 a.m. UTC | #1
Hi Alexander,

kernel test robot noticed the following build errors:

[auto build test ERROR on akpm-mm/mm-everything]
[also build test ERROR on linus/master v6.5 next-20230908]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Alexander-Potapenko/kmsan-prevent-optimizations-in-memcpy-tests/20230907-210817
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20230907130642.245222-2-glider%40google.com
patch subject: [PATCH 2/2] kmsan: prevent optimizations in memcpy tests
config: x86_64-buildonly-randconfig-006-20230910 (https://download.01.org/0day-ci/archive/20230910/202309100805.cRHktAYd-lkp@intel.com/config)
compiler: clang version 16.0.4 (https://github.com/llvm/llvm-project.git ae42196bc493ffe877a7e3dff8be32035dea4d07)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20230910/202309100805.cRHktAYd-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202309100805.cRHktAYd-lkp@intel.com/

All errors (new ones prefixed by >>):

>> mm/kmsan/kmsan_test.c:414:16: error: passing 'volatile void *' to parameter of type 'void *' discards qualifiers [-Werror,-Wincompatible-pointer-types-discards-qualifiers]
           return memcpy(dst, src, size);
                         ^~~
   arch/x86/include/asm/string_64.h:18:27: note: passing argument to parameter 'to' here
   extern void *memcpy(void *to, const void *from, size_t len);
                             ^
>> mm/kmsan/kmsan_test.c:414:21: error: passing 'const volatile void *' to parameter of type 'const void *' discards qualifiers [-Werror,-Wincompatible-pointer-types-discards-qualifiers]
           return memcpy(dst, src, size);
                              ^~~
   arch/x86/include/asm/string_64.h:18:43: note: passing argument to parameter 'from' here
   extern void *memcpy(void *to, const void *from, size_t len);
                                             ^
>> mm/kmsan/kmsan_test.c:468:21: error: passing 'volatile int *' to parameter of type 'const void *' discards qualifiers [-Werror,-Wincompatible-pointer-types-discards-qualifiers]
           kmsan_check_memory(&uninit_src, sizeof(uninit_src));
                              ^~~~~~~~~~~
   include/linux/kmsan-checks.h:47:37: note: passing argument to parameter 'address' here
   void kmsan_check_memory(const void *address, size_t size);
                                       ^
   3 errors generated.


vim +414 mm/kmsan/kmsan_test.c

   409	
   410	/* Prevent the compiler from inlining a memcpy() call. */
   411	static noinline void *memcpy_noinline(volatile void *dst,
   412					      const volatile void *src, size_t size)
   413	{
 > 414		return memcpy(dst, src, size);
   415	}
   416	
   417	/* Test case: ensure that memcpy() correctly copies initialized values. */
   418	static void test_init_memcpy(struct kunit *test)
   419	{
   420		EXPECTATION_NO_REPORT(expect);
   421		volatile int src;
   422		volatile int dst = 0;
   423	
   424		src = 1;
   425		kunit_info(
   426			test,
   427			"memcpy()ing aligned initialized src to aligned dst (no reports)\n");
   428		memcpy_noinline((void *)&dst, (void *)&src, sizeof(src));
   429		kmsan_check_memory((void *)&dst, sizeof(dst));
   430		KUNIT_EXPECT_TRUE(test, report_matches(&expect));
   431	}
   432	
   433	/*
   434	 * Test case: ensure that memcpy() correctly copies uninitialized values between
   435	 * aligned `src` and `dst`.
   436	 */
   437	static void test_memcpy_aligned_to_aligned(struct kunit *test)
   438	{
   439		EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_aligned");
   440		volatile int uninit_src;
   441		volatile int dst = 0;
   442	
   443		kunit_info(
   444			test,
   445			"memcpy()ing aligned uninit src to aligned dst (UMR report)\n");
   446		memcpy_noinline((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
   447		kmsan_check_memory((void *)&dst, sizeof(dst));
   448		KUNIT_EXPECT_TRUE(test, report_matches(&expect));
   449	}
   450	
   451	/*
   452	 * Test case: ensure that memcpy() correctly copies uninitialized values between
   453	 * aligned `src` and unaligned `dst`.
   454	 *
   455	 * Copying aligned 4-byte value to an unaligned one leads to touching two
   456	 * aligned 4-byte values. This test case checks that KMSAN correctly reports an
   457	 * error on the first of the two values.
   458	 */
   459	static void test_memcpy_aligned_to_unaligned(struct kunit *test)
   460	{
   461		EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_unaligned");
   462		volatile int uninit_src;
   463		volatile char dst[8] = { 0 };
   464	
   465		kunit_info(
   466			test,
   467			"memcpy()ing aligned uninit src to unaligned dst (UMR report)\n");
 > 468		kmsan_check_memory(&uninit_src, sizeof(uninit_src));
   469		memcpy_noinline((void *)&dst[1], (void *)&uninit_src,
   470				sizeof(uninit_src));
   471		kmsan_check_memory((void *)dst, 4);
   472		KUNIT_EXPECT_TRUE(test, report_matches(&expect));
   473	}
   474
diff mbox series

Patch

diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
index 312989aa2865c..0c32c917b489a 100644
--- a/mm/kmsan/kmsan_test.c
+++ b/mm/kmsan/kmsan_test.c
@@ -407,33 +407,25 @@  static void test_printk(struct kunit *test)
 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 }
 
-/*
- * Prevent the compiler from optimizing @var away. Without this, Clang may
- * notice that @var is uninitialized and drop memcpy() calls that use it.
- *
- * There is OPTIMIZER_HIDE_VAR() in linux/compier.h that we cannot use here,
- * because it is implemented as inline assembly receiving @var as a parameter
- * and will enforce a KMSAN check. Same is true for e.g. barrier_data(var).
- */
-#define DO_NOT_OPTIMIZE(var) barrier()
+/* Prevent the compiler from inlining a memcpy() call. */
+static noinline void *memcpy_noinline(volatile void *dst,
+				      const volatile void *src, size_t size)
+{
+	return memcpy(dst, src, size);
+}
 
-/*
- * Test case: ensure that memcpy() correctly copies initialized values.
- * Also serves as a regression test to ensure DO_NOT_OPTIMIZE() does not cause
- * extra checks.
- */
+/* Test case: ensure that memcpy() correctly copies initialized values. */
 static void test_init_memcpy(struct kunit *test)
 {
 	EXPECTATION_NO_REPORT(expect);
 	volatile int src;
 	volatile int dst = 0;
 
-	DO_NOT_OPTIMIZE(src);
 	src = 1;
 	kunit_info(
 		test,
 		"memcpy()ing aligned initialized src to aligned dst (no reports)\n");
-	memcpy((void *)&dst, (void *)&src, sizeof(src));
+	memcpy_noinline((void *)&dst, (void *)&src, sizeof(src));
 	kmsan_check_memory((void *)&dst, sizeof(dst));
 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 }
@@ -451,8 +443,7 @@  static void test_memcpy_aligned_to_aligned(struct kunit *test)
 	kunit_info(
 		test,
 		"memcpy()ing aligned uninit src to aligned dst (UMR report)\n");
-	DO_NOT_OPTIMIZE(uninit_src);
-	memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
+	memcpy_noinline((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
 	kmsan_check_memory((void *)&dst, sizeof(dst));
 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 }
@@ -474,8 +465,9 @@  static void test_memcpy_aligned_to_unaligned(struct kunit *test)
 	kunit_info(
 		test,
 		"memcpy()ing aligned uninit src to unaligned dst (UMR report)\n");
-	DO_NOT_OPTIMIZE(uninit_src);
-	memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
+	kmsan_check_memory(&uninit_src, sizeof(uninit_src));
+	memcpy_noinline((void *)&dst[1], (void *)&uninit_src,
+			sizeof(uninit_src));
 	kmsan_check_memory((void *)dst, 4);
 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 }
@@ -498,8 +490,8 @@  static void test_memcpy_aligned_to_unaligned2(struct kunit *test)
 	kunit_info(
 		test,
 		"memcpy()ing aligned uninit src to unaligned dst - part 2 (UMR report)\n");
-	DO_NOT_OPTIMIZE(uninit_src);
-	memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
+	memcpy_noinline((void *)&dst[1], (void *)&uninit_src,
+			sizeof(uninit_src));
 	kmsan_check_memory((void *)&dst[4], sizeof(uninit_src));
 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 }
@@ -513,7 +505,6 @@  static void test_memcpy_aligned_to_unaligned2(struct kunit *test)
                                                                             \
 		kunit_info(test,                                            \
 			   "memset" #size "() should initialize memory\n"); \
-		DO_NOT_OPTIMIZE(uninit);                                    \
 		memset##size((uint##size##_t *)&uninit, 0, 1);              \
 		kmsan_check_memory((void *)&uninit, sizeof(uninit));        \
 		KUNIT_EXPECT_TRUE(test, report_matches(&expect));           \