diff mbox series

[RFC,v3,26/36] kmsan: use __msan_memcpy() where possible.

Message ID 20191122112621.204798-27-glider@google.com (mailing list archive)
State New, archived
Headers show
Series Add KernelMemorySanitizer infrastructure | expand

Commit Message

Alexander Potapenko Nov. 22, 2019, 11:26 a.m. UTC
Unless stated otherwise (by explicitly calling __memcpy()) we want all
memcpy() calls to call __msan_memcpy() so that shadow and origin values
are updated accordingly.

Bootloader must still the default string functions to avoid crashes.

Signed-off-by: Alexander Potapenko <glider@google.com>
To: Alexander Potapenko <glider@google.com>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: linux-mm@kvack.org
---
v3:
 - use default string functions in the bootloader

Change-Id: Ib2512ce5aa8d457453dd38caa12f58f002166813
---
 arch/x86/boot/compressed/misc.h  | 1 +
 arch/x86/include/asm/string_64.h | 9 ++++++++-
 include/linux/compiler.h         | 9 ++++++++-
 include/linux/string.h           | 2 ++
 4 files changed, 19 insertions(+), 2 deletions(-)

Comments

Andrey Konovalov Nov. 29, 2019, 3:13 p.m. UTC | #1
On Fri, Nov 22, 2019 at 12:27 PM <glider@google.com> wrote:
>
> Unless stated otherwise (by explicitly calling __memcpy()) we want all
> memcpy() calls to call __msan_memcpy() so that shadow and origin values
> are updated accordingly.

Why do we only do this for memcpy() but not for memove() and others?

>
> Bootloader must still the default string functions to avoid crashes.

must still use

>
> Signed-off-by: Alexander Potapenko <glider@google.com>
> To: Alexander Potapenko <glider@google.com>
> Cc: Vegard Nossum <vegard.nossum@oracle.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: linux-mm@kvack.org
> ---
> v3:
>  - use default string functions in the bootloader
>
> Change-Id: Ib2512ce5aa8d457453dd38caa12f58f002166813
> ---
>  arch/x86/boot/compressed/misc.h  | 1 +
>  arch/x86/include/asm/string_64.h | 9 ++++++++-
>  include/linux/compiler.h         | 9 ++++++++-
>  include/linux/string.h           | 2 ++
>  4 files changed, 19 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
> index c8181392f70d..dd4bd8c5d97a 100644
> --- a/arch/x86/boot/compressed/misc.h
> +++ b/arch/x86/boot/compressed/misc.h
> @@ -12,6 +12,7 @@
>  #undef CONFIG_PARAVIRT_XXL
>  #undef CONFIG_PARAVIRT_SPINLOCKS
>  #undef CONFIG_KASAN
> +#undef CONFIG_KMSAN
>
>  /* cpu_feature_enabled() cannot be used this early */
>  #define USE_EARLY_PGTABLE_L5
> diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
> index 75314c3dbe47..d3c76d910c23 100644
> --- a/arch/x86/include/asm/string_64.h
> +++ b/arch/x86/include/asm/string_64.h
> @@ -11,7 +11,13 @@
>     function. */
>
>  #define __HAVE_ARCH_MEMCPY 1
> +#if defined(CONFIG_KMSAN)
> +#undef memcpy
> +/* __msan_memcpy() is defined in compiler.h */
> +#define memcpy(dst, src, len) __msan_memcpy(dst, src, len)
> +#else
>  extern void *memcpy(void *to, const void *from, size_t len);
> +#endif
>  extern void *__memcpy(void *to, const void *from, size_t len);
>
>  #define __HAVE_ARCH_MEMSET
> @@ -64,7 +70,8 @@ char *strcpy(char *dest, const char *src);
>  char *strcat(char *dest, const char *src);
>  int strcmp(const char *cs, const char *ct);
>
> -#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
> +#if (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)) || \
> +       (defined(CONFIG_KMSAN) && !defined(__SANITIZE_MEMORY__))
>
>  /*
>   * For files that not instrumented (e.g. mm/slub.c) we
> diff --git a/include/linux/compiler.h b/include/linux/compiler.h
> index 99d40f31a2c3..9ce11f4f4cb2 100644
> --- a/include/linux/compiler.h
> +++ b/include/linux/compiler.h
> @@ -179,6 +179,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
>
>  #include <uapi/linux/types.h>
>
> +#ifdef CONFIG_KMSAN
> +void *__msan_memcpy(void *dst, const void *src, u64 size);
> +#define __DO_MEMCPY(res, p, size) __msan_memcpy(res, p, size)
> +#else
> +#define __DO_MEMCPY(res, p, size) __builtin_memcpy(res, p, size)
> +#endif
> +
>  #define __READ_ONCE_SIZE                                               \
>  ({                                                                     \
>         switch (size) {                                                 \
> @@ -188,7 +195,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
>         case 8: *(__u64 *)res = *(volatile __u64 *)p; break;            \
>         default:                                                        \
>                 barrier();                                              \
> -               __builtin_memcpy((void *)res, (const void *)p, size);   \
> +               __DO_MEMCPY((void *)res, (const void *)p, size);        \
>                 barrier();                                              \
>         }                                                               \
>  })
> diff --git a/include/linux/string.h b/include/linux/string.h
> index b6ccdc2c7f02..5d8ce09cba2e 100644
> --- a/include/linux/string.h
> +++ b/include/linux/string.h
> @@ -363,6 +363,7 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
>         return __builtin_memset(p, c, size);
>  }
>
> +#ifndef CONFIG_KMSAN
>  __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
>  {
>         size_t p_size = __builtin_object_size(p, 0);
> @@ -377,6 +378,7 @@ __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
>                 fortify_panic(__func__);
>         return __builtin_memcpy(p, q, size);
>  }
> +#endif
>
>  __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
>  {
> --
> 2.24.0.432.g9d3f5f5b63-goog
>
Alexander Potapenko Dec. 5, 2019, 3:46 p.m. UTC | #2
On Fri, Nov 29, 2019 at 4:13 PM Andrey Konovalov <andreyknvl@google.com> wrote:
>
> On Fri, Nov 22, 2019 at 12:27 PM <glider@google.com> wrote:
> >
> > Unless stated otherwise (by explicitly calling __memcpy()) we want all
> > memcpy() calls to call __msan_memcpy() so that shadow and origin values
> > are updated accordingly.
>
> Why do we only do this for memcpy() but not for memove() and others?
Hm, interesting.
Looks like I simply forgot to add memset() and memmove(). Could have
costed us some false negatives.
> >
> > Bootloader must still the default string functions to avoid crashes.
>
> must still use
Ack
>
> >
> > Signed-off-by: Alexander Potapenko <glider@google.com>
> > To: Alexander Potapenko <glider@google.com>
> > Cc: Vegard Nossum <vegard.nossum@oracle.com>
> > Cc: Dmitry Vyukov <dvyukov@google.com>
> > Cc: linux-mm@kvack.org
> > ---
> > v3:
> >  - use default string functions in the bootloader
> >
> > Change-Id: Ib2512ce5aa8d457453dd38caa12f58f002166813
> > ---
> >  arch/x86/boot/compressed/misc.h  | 1 +
> >  arch/x86/include/asm/string_64.h | 9 ++++++++-
> >  include/linux/compiler.h         | 9 ++++++++-
> >  include/linux/string.h           | 2 ++
> >  4 files changed, 19 insertions(+), 2 deletions(-)
> >
> > diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
> > index c8181392f70d..dd4bd8c5d97a 100644
> > --- a/arch/x86/boot/compressed/misc.h
> > +++ b/arch/x86/boot/compressed/misc.h
> > @@ -12,6 +12,7 @@
> >  #undef CONFIG_PARAVIRT_XXL
> >  #undef CONFIG_PARAVIRT_SPINLOCKS
> >  #undef CONFIG_KASAN
> > +#undef CONFIG_KMSAN
> >
> >  /* cpu_feature_enabled() cannot be used this early */
> >  #define USE_EARLY_PGTABLE_L5
> > diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
> > index 75314c3dbe47..d3c76d910c23 100644
> > --- a/arch/x86/include/asm/string_64.h
> > +++ b/arch/x86/include/asm/string_64.h
> > @@ -11,7 +11,13 @@
> >     function. */
> >
> >  #define __HAVE_ARCH_MEMCPY 1
> > +#if defined(CONFIG_KMSAN)
> > +#undef memcpy
> > +/* __msan_memcpy() is defined in compiler.h */
> > +#define memcpy(dst, src, len) __msan_memcpy(dst, src, len)
> > +#else
> >  extern void *memcpy(void *to, const void *from, size_t len);
> > +#endif
> >  extern void *__memcpy(void *to, const void *from, size_t len);
> >
> >  #define __HAVE_ARCH_MEMSET
> > @@ -64,7 +70,8 @@ char *strcpy(char *dest, const char *src);
> >  char *strcat(char *dest, const char *src);
> >  int strcmp(const char *cs, const char *ct);
> >
> > -#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
> > +#if (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)) || \
> > +       (defined(CONFIG_KMSAN) && !defined(__SANITIZE_MEMORY__))
> >
> >  /*
> >   * For files that not instrumented (e.g. mm/slub.c) we
> > diff --git a/include/linux/compiler.h b/include/linux/compiler.h
> > index 99d40f31a2c3..9ce11f4f4cb2 100644
> > --- a/include/linux/compiler.h
> > +++ b/include/linux/compiler.h
> > @@ -179,6 +179,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
> >
> >  #include <uapi/linux/types.h>
> >
> > +#ifdef CONFIG_KMSAN
> > +void *__msan_memcpy(void *dst, const void *src, u64 size);
> > +#define __DO_MEMCPY(res, p, size) __msan_memcpy(res, p, size)
> > +#else
> > +#define __DO_MEMCPY(res, p, size) __builtin_memcpy(res, p, size)
> > +#endif
> > +
> >  #define __READ_ONCE_SIZE                                               \
> >  ({                                                                     \
> >         switch (size) {                                                 \
> > @@ -188,7 +195,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
> >         case 8: *(__u64 *)res = *(volatile __u64 *)p; break;            \
> >         default:                                                        \
> >                 barrier();                                              \
> > -               __builtin_memcpy((void *)res, (const void *)p, size);   \
> > +               __DO_MEMCPY((void *)res, (const void *)p, size);        \
> >                 barrier();                                              \
> >         }                                                               \
> >  })
> > diff --git a/include/linux/string.h b/include/linux/string.h
> > index b6ccdc2c7f02..5d8ce09cba2e 100644
> > --- a/include/linux/string.h
> > +++ b/include/linux/string.h
> > @@ -363,6 +363,7 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
> >         return __builtin_memset(p, c, size);
> >  }
> >
> > +#ifndef CONFIG_KMSAN
> >  __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
> >  {
> >         size_t p_size = __builtin_object_size(p, 0);
> > @@ -377,6 +378,7 @@ __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
> >                 fortify_panic(__func__);
> >         return __builtin_memcpy(p, q, size);
> >  }
> > +#endif
> >
> >  __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
> >  {
> > --
> > 2.24.0.432.g9d3f5f5b63-goog
> >
diff mbox series

Patch

diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index c8181392f70d..dd4bd8c5d97a 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -12,6 +12,7 @@ 
 #undef CONFIG_PARAVIRT_XXL
 #undef CONFIG_PARAVIRT_SPINLOCKS
 #undef CONFIG_KASAN
+#undef CONFIG_KMSAN
 
 /* cpu_feature_enabled() cannot be used this early */
 #define USE_EARLY_PGTABLE_L5
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 75314c3dbe47..d3c76d910c23 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -11,7 +11,13 @@ 
    function. */
 
 #define __HAVE_ARCH_MEMCPY 1
+#if defined(CONFIG_KMSAN)
+#undef memcpy
+/* __msan_memcpy() is defined in compiler.h */
+#define memcpy(dst, src, len) __msan_memcpy(dst, src, len)
+#else
 extern void *memcpy(void *to, const void *from, size_t len);
+#endif
 extern void *__memcpy(void *to, const void *from, size_t len);
 
 #define __HAVE_ARCH_MEMSET
@@ -64,7 +70,8 @@  char *strcpy(char *dest, const char *src);
 char *strcat(char *dest, const char *src);
 int strcmp(const char *cs, const char *ct);
 
-#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+#if (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)) || \
+	(defined(CONFIG_KMSAN) && !defined(__SANITIZE_MEMORY__))
 
 /*
  * For files that not instrumented (e.g. mm/slub.c) we
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 99d40f31a2c3..9ce11f4f4cb2 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -179,6 +179,13 @@  void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 
 #include <uapi/linux/types.h>
 
+#ifdef CONFIG_KMSAN
+void *__msan_memcpy(void *dst, const void *src, u64 size);
+#define __DO_MEMCPY(res, p, size) __msan_memcpy(res, p, size)
+#else
+#define __DO_MEMCPY(res, p, size) __builtin_memcpy(res, p, size)
+#endif
+
 #define __READ_ONCE_SIZE						\
 ({									\
 	switch (size) {							\
@@ -188,7 +195,7 @@  void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
 	default:							\
 		barrier();						\
-		__builtin_memcpy((void *)res, (const void *)p, size);	\
+		__DO_MEMCPY((void *)res, (const void *)p, size);	\
 		barrier();						\
 	}								\
 })
diff --git a/include/linux/string.h b/include/linux/string.h
index b6ccdc2c7f02..5d8ce09cba2e 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -363,6 +363,7 @@  __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
 	return __builtin_memset(p, c, size);
 }
 
+#ifndef CONFIG_KMSAN
 __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
 {
 	size_t p_size = __builtin_object_size(p, 0);
@@ -377,6 +378,7 @@  __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
 		fortify_panic(__func__);
 	return __builtin_memcpy(p, q, size);
 }
+#endif
 
 __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
 {