diff mbox series

[RFC,v3,22/36] kmsan: mm: call KMSAN hooks from SLUB code

Message ID 20191122112621.204798-23-glider@google.com (mailing list archive)
State New, archived
Headers show
Series Add KernelMemorySanitizer infrastructure | expand

Commit Message

Alexander Potapenko Nov. 22, 2019, 11:26 a.m. UTC
In order to report uninitialized memory coming from heap allocations
KMSAN has to poison them unless they're created with __GFP_ZERO.

It's handy that we need KMSAN hooks in the places where
init_on_alloc/init_on_free initialization is performed.

Signed-off-by: Alexander Potapenko <glider@google.com>
To: Alexander Potapenko <glider@google.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: linux-mm@kvack.org
---
v3:
 - reverted unrelated whitespace changes

Change-Id: I51103b7981d3aabed747d0c85cbdc85568665871
---
 mm/slub.c | 34 +++++++++++++++++++++++++++++-----
 1 file changed, 29 insertions(+), 5 deletions(-)

Comments

Marco Elver Dec. 2, 2019, 3:36 p.m. UTC | #1
On Fri, 22 Nov 2019 at 12:27, <glider@google.com> wrote:
>
> In order to report uninitialized memory coming from heap allocations
> KMSAN has to poison them unless they're created with __GFP_ZERO.
>
> It's handy that we need KMSAN hooks in the places where
> init_on_alloc/init_on_free initialization is performed.
>
> Signed-off-by: Alexander Potapenko <glider@google.com>
> To: Alexander Potapenko <glider@google.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Vegard Nossum <vegard.nossum@oracle.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: linux-mm@kvack.org
> ---
> v3:
>  - reverted unrelated whitespace changes
>
> Change-Id: I51103b7981d3aabed747d0c85cbdc85568665871
> ---
>  mm/slub.c | 34 +++++++++++++++++++++++++++++-----
>  1 file changed, 29 insertions(+), 5 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index b25c807a111f..b5d2be1ac755 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -21,6 +21,8 @@
>  #include <linux/proc_fs.h>
>  #include <linux/seq_file.h>
>  #include <linux/kasan.h>
> +#include <linux/kmsan.h>
> +#include <linux/kmsan-checks.h> /* KMSAN_INIT_VALUE */
>  #include <linux/cpu.h>
>  #include <linux/cpuset.h>
>  #include <linux/mempolicy.h>
> @@ -285,17 +287,27 @@ static void prefetch_freepointer(const struct kmem_cache *s, void *object)
>         prefetch(object + s->offset);
>  }
>
> +/*
> + * When running under KMSAN, get_freepointer_safe() may return an uninitialized
> + * pointer value in the case the current thread loses the race for the next
> + * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
> + * slab_alloc_node() will fail, so the uninitialized value won't be used, but
> + * KMSAN will still check all arguments of cmpxchg because of imperfect
> + * handling of inline assembly.
> + * To work around this problem, use KMSAN_INIT_VALUE() to force initialize the
> + * return value of get_freepointer_safe().
> + */

Isn't this a general problem with cmpxchg? I.e. does other code using
it have the same problem?

Would it be better to just use KMSAN_INIT_VALUE in cmpxchg, rather
than having the one-off workaround here?

Thanks,
-- Marco
Alexander Potapenko Dec. 10, 2019, 12:07 p.m. UTC | #2
On Mon, Dec 2, 2019 at 4:37 PM Marco Elver <elver@google.com> wrote:
>
> On Fri, 22 Nov 2019 at 12:27, <glider@google.com> wrote:
> >
> > In order to report uninitialized memory coming from heap allocations
> > KMSAN has to poison them unless they're created with __GFP_ZERO.
> >
> > It's handy that we need KMSAN hooks in the places where
> > init_on_alloc/init_on_free initialization is performed.
> >
> > Signed-off-by: Alexander Potapenko <glider@google.com>
> > To: Alexander Potapenko <glider@google.com>
> > Cc: Andrew Morton <akpm@linux-foundation.org>
> > Cc: Vegard Nossum <vegard.nossum@oracle.com>
> > Cc: Dmitry Vyukov <dvyukov@google.com>
> > Cc: linux-mm@kvack.org
> > ---
> > v3:
> >  - reverted unrelated whitespace changes
> >
> > Change-Id: I51103b7981d3aabed747d0c85cbdc85568665871
> > ---
> >  mm/slub.c | 34 +++++++++++++++++++++++++++++-----
> >  1 file changed, 29 insertions(+), 5 deletions(-)
> >
> > diff --git a/mm/slub.c b/mm/slub.c
> > index b25c807a111f..b5d2be1ac755 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -21,6 +21,8 @@
> >  #include <linux/proc_fs.h>
> >  #include <linux/seq_file.h>
> >  #include <linux/kasan.h>
> > +#include <linux/kmsan.h>
> > +#include <linux/kmsan-checks.h> /* KMSAN_INIT_VALUE */
> >  #include <linux/cpu.h>
> >  #include <linux/cpuset.h>
> >  #include <linux/mempolicy.h>
> > @@ -285,17 +287,27 @@ static void prefetch_freepointer(const struct kmem_cache *s, void *object)
> >         prefetch(object + s->offset);
> >  }
> >
> > +/*
> > + * When running under KMSAN, get_freepointer_safe() may return an uninitialized
> > + * pointer value in the case the current thread loses the race for the next
> > + * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
> > + * slab_alloc_node() will fail, so the uninitialized value won't be used, but
> > + * KMSAN will still check all arguments of cmpxchg because of imperfect
> > + * handling of inline assembly.
> > + * To work around this problem, use KMSAN_INIT_VALUE() to force initialize the
> > + * return value of get_freepointer_safe().
> > + */
>
> Isn't this a general problem with cmpxchg? I.e. does other code using
> it have the same problem?
>
> Would it be better to just use KMSAN_INIT_VALUE in cmpxchg, rather
> than having the one-off workaround here?
I don't think so. It's normally still an error to pass an
uninitialized arg to cmpxchg, as in other cases it may be copied to
the result.
> Thanks,
> -- Marco
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index b25c807a111f..b5d2be1ac755 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -21,6 +21,8 @@ 
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/kasan.h>
+#include <linux/kmsan.h>
+#include <linux/kmsan-checks.h> /* KMSAN_INIT_VALUE */
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/mempolicy.h>
@@ -285,17 +287,27 @@  static void prefetch_freepointer(const struct kmem_cache *s, void *object)
 	prefetch(object + s->offset);
 }
 
+/*
+ * When running under KMSAN, get_freepointer_safe() may return an uninitialized
+ * pointer value in the case the current thread loses the race for the next
+ * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
+ * slab_alloc_node() will fail, so the uninitialized value won't be used, but
+ * KMSAN will still check all arguments of cmpxchg because of imperfect
+ * handling of inline assembly.
+ * To work around this problem, use KMSAN_INIT_VALUE() to force initialize the
+ * return value of get_freepointer_safe().
+ */
 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
 {
 	unsigned long freepointer_addr;
 	void *p;
 
 	if (!debug_pagealloc_enabled())
-		return get_freepointer(s, object);
+		return KMSAN_INIT_VALUE(get_freepointer(s, object));
 
 	freepointer_addr = (unsigned long)object + s->offset;
 	probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
-	return freelist_ptr(s, p, freepointer_addr);
+	return KMSAN_INIT_VALUE(freelist_ptr(s, p, freepointer_addr));
 }
 
 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
@@ -1390,6 +1402,7 @@  static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
 	ptr = kasan_kmalloc_large(ptr, size, flags);
 	/* As ptr might get tagged, call kmemleak hook after KASAN. */
 	kmemleak_alloc(ptr, size, 1, flags);
+	kmsan_kmalloc_large(ptr, size, flags);
 	return ptr;
 }
 
@@ -1397,6 +1410,7 @@  static __always_inline void kfree_hook(void *x)
 {
 	kmemleak_free(x);
 	kasan_kfree_large(x, _RET_IP_);
+	kmsan_kfree_large(x);
 }
 
 static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
@@ -1453,6 +1467,12 @@  static inline bool slab_free_freelist_hook(struct kmem_cache *s,
 		} while (object != old_tail);
 	}
 
+	do {
+		object = next;
+		next = get_freepointer(s, object);
+		kmsan_slab_free(s, object);
+	} while (object != old_tail);
+
 /*
  * Compiler cannot detect this function can be removed if slab_free_hook()
  * evaluates to nothing.  Thus, catch all relevant config debug options here.
@@ -2776,6 +2796,7 @@  static __always_inline void *slab_alloc_node(struct kmem_cache *s,
 	if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
 		memset(object, 0, s->object_size);
 
+	kmsan_slab_alloc(s, object, gfpflags);
 	slab_post_alloc_hook(s, gfpflags, 1, &object);
 
 	return object;
@@ -3157,7 +3178,7 @@  int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 			  void **p)
 {
 	struct kmem_cache_cpu *c;
-	int i;
+	int i, j;
 
 	/* memcg and kmem_cache debug support */
 	s = slab_pre_alloc_hook(s, flags);
@@ -3198,11 +3219,11 @@  int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 
 	/* Clear memory outside IRQ disabled fastpath loop */
 	if (unlikely(slab_want_init_on_alloc(flags, s))) {
-		int j;
-
 		for (j = 0; j < i; j++)
 			memset(p[j], 0, s->object_size);
 	}
+	for (j = 0; j < i; j++)
+		kmsan_slab_alloc(s, p[j], flags);
 
 	/* memcg and kmem_cache debug support */
 	slab_post_alloc_hook(s, flags, size, p);
@@ -3803,6 +3824,7 @@  static int __init setup_slub_min_objects(char *str)
 
 __setup("slub_min_objects=", setup_slub_min_objects);
 
+__no_sanitize_memory
 void *__kmalloc(size_t size, gfp_t flags)
 {
 	struct kmem_cache *s;
@@ -5717,6 +5739,7 @@  static char *create_unique_id(struct kmem_cache *s)
 	p += sprintf(p, "%07u", s->size);
 
 	BUG_ON(p > name + ID_STR_LENGTH - 1);
+	kmsan_unpoison_shadow(name, p - name);
 	return name;
 }
 
@@ -5866,6 +5889,7 @@  static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
 	al->name = name;
 	al->next = alias_list;
 	alias_list = al;
+	kmsan_unpoison_shadow(al, sizeof(struct saved_alias));
 	return 0;
 }