Message ID | 20191030142237.249532-18-glider@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add KernelMemorySanitizer infrastructure | expand |
On Wed, Oct 30, 2019 at 3:23 PM <glider@google.com> wrote: > > In order to report uninitialized memory coming from heap allocations > KMSAN has to poison them unless they're created with __GFP_ZERO. > > It's handy that we need KMSAN hooks in the places where > init_on_alloc/init_on_free initialization is performed. > > Signed-off-by: Alexander Potapenko <glider@google.com> > To: Alexander Potapenko <glider@google.com> > Cc: Andrew Morton <akpm@linux-foundation.org> > Cc: Vegard Nossum <vegard.nossum@oracle.com> > Cc: Dmitry Vyukov <dvyukov@google.com> > Cc: linux-mm@kvack.org > --- > > Change-Id: I51103b7981d3aabed747d0c85cbdc85568665871 > --- > mm/slub.c | 37 +++++++++++++++++++++++++++++++------ > 1 file changed, 31 insertions(+), 6 deletions(-) > > diff --git a/mm/slub.c b/mm/slub.c > index b25c807a111f..8b7069812801 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -21,6 +21,8 @@ > #include <linux/proc_fs.h> > #include <linux/seq_file.h> > #include <linux/kasan.h> > +#include <linux/kmsan.h> > +#include <linux/kmsan-checks.h> /* KMSAN_INIT_VALUE */ > #include <linux/cpu.h> > #include <linux/cpuset.h> > #include <linux/mempolicy.h> > @@ -285,17 +287,27 @@ static void prefetch_freepointer(const struct kmem_cache *s, void *object) > prefetch(object + s->offset); > } > > +/* > + * When running under KMSAN, get_freepointer_safe() may return an uninitialized > + * pointer value in the case the current thread loses the race for the next > + * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in > + * slab_alloc_node() will fail, so the uninitialized value won't be used, but > + * KMSAN will still check all arguments of cmpxchg because of imperfect > + * handling of inline assembly. > + * To work around this problem, use KMSAN_INIT_VALUE() to force initialize the > + * return value of get_freepointer_safe(). > + */ > static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) > { > unsigned long freepointer_addr; > void *p; > > if (!debug_pagealloc_enabled()) > - return get_freepointer(s, object); > + return KMSAN_INIT_VALUE(get_freepointer(s, object)); > > freepointer_addr = (unsigned long)object + s->offset; > probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p)); > - return freelist_ptr(s, p, freepointer_addr); > + return KMSAN_INIT_VALUE(freelist_ptr(s, p, freepointer_addr)); > } > > static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) > @@ -1390,6 +1402,7 @@ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) > ptr = kasan_kmalloc_large(ptr, size, flags); > /* As ptr might get tagged, call kmemleak hook after KASAN. */ > kmemleak_alloc(ptr, size, 1, flags); > + kmsan_kmalloc_large(ptr, size, flags); > return ptr; > } > > @@ -1397,6 +1410,7 @@ static __always_inline void kfree_hook(void *x) > { > kmemleak_free(x); > kasan_kfree_large(x, _RET_IP_); > + kmsan_kfree_large(x); > } > > static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x) > @@ -1453,6 +1467,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, > } while (object != old_tail); > } > > + do { > + object = next; > + next = get_freepointer(s, object); > + kmsan_slab_free(s, object); > + } while (object != old_tail); > + > /* > * Compiler cannot detect this function can be removed if slab_free_hook() > * evaluates to nothing. Thus, catch all relevant config debug options here. > @@ -2776,6 +2796,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, > if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) > memset(object, 0, s->object_size); > > + kmsan_slab_alloc(s, object, gfpflags); > slab_post_alloc_hook(s, gfpflags, 1, &object); > > return object; > @@ -2804,6 +2825,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) > void *ret = slab_alloc(s, gfpflags, _RET_IP_); > trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); > ret = kasan_kmalloc(s, ret, size, gfpflags); > + Looks like unrelated change. > return ret; > } > EXPORT_SYMBOL(kmem_cache_alloc_trace); > @@ -2816,7 +2838,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) > > trace_kmem_cache_alloc_node(_RET_IP_, ret, > s->object_size, s->size, gfpflags, node); > - Same here. > return ret; > } > EXPORT_SYMBOL(kmem_cache_alloc_node); > @@ -2832,6 +2853,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, > size, s->size, gfpflags, node); > > ret = kasan_kmalloc(s, ret, size, gfpflags); > + And here. > return ret; > } > EXPORT_SYMBOL(kmem_cache_alloc_node_trace); > @@ -3157,7 +3179,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, > void **p) > { > struct kmem_cache_cpu *c; > - int i; > + int i, j; > > /* memcg and kmem_cache debug support */ > s = slab_pre_alloc_hook(s, flags); > @@ -3198,11 +3220,11 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, > > /* Clear memory outside IRQ disabled fastpath loop */ > if (unlikely(slab_want_init_on_alloc(flags, s))) { > - int j; > - > for (j = 0; j < i; j++) > memset(p[j], 0, s->object_size); > } > + for (j = 0; j < i; j++) > + kmsan_slab_alloc(s, p[j], flags); > > /* memcg and kmem_cache debug support */ > slab_post_alloc_hook(s, flags, size, p); > @@ -3803,6 +3825,7 @@ static int __init setup_slub_min_objects(char *str) > > __setup("slub_min_objects=", setup_slub_min_objects); > > +__no_sanitize_memory > void *__kmalloc(size_t size, gfp_t flags) > { > struct kmem_cache *s; > @@ -5717,6 +5740,7 @@ static char *create_unique_id(struct kmem_cache *s) > p += sprintf(p, "%07u", s->size); > > BUG_ON(p > name + ID_STR_LENGTH - 1); > + kmsan_unpoison_shadow(name, p - name); > return name; > } > > @@ -5866,6 +5890,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) > al->name = name; > al->next = alias_list; > alias_list = al; > + kmsan_unpoison_shadow(al, sizeof(struct saved_alias)); > return 0; > } > > -- > 2.24.0.rc0.303.g954a862665-goog >
diff --git a/mm/slub.c b/mm/slub.c index b25c807a111f..8b7069812801 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -21,6 +21,8 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/kasan.h> +#include <linux/kmsan.h> +#include <linux/kmsan-checks.h> /* KMSAN_INIT_VALUE */ #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/mempolicy.h> @@ -285,17 +287,27 @@ static void prefetch_freepointer(const struct kmem_cache *s, void *object) prefetch(object + s->offset); } +/* + * When running under KMSAN, get_freepointer_safe() may return an uninitialized + * pointer value in the case the current thread loses the race for the next + * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in + * slab_alloc_node() will fail, so the uninitialized value won't be used, but + * KMSAN will still check all arguments of cmpxchg because of imperfect + * handling of inline assembly. + * To work around this problem, use KMSAN_INIT_VALUE() to force initialize the + * return value of get_freepointer_safe(). + */ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) { unsigned long freepointer_addr; void *p; if (!debug_pagealloc_enabled()) - return get_freepointer(s, object); + return KMSAN_INIT_VALUE(get_freepointer(s, object)); freepointer_addr = (unsigned long)object + s->offset; probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p)); - return freelist_ptr(s, p, freepointer_addr); + return KMSAN_INIT_VALUE(freelist_ptr(s, p, freepointer_addr)); } static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) @@ -1390,6 +1402,7 @@ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) ptr = kasan_kmalloc_large(ptr, size, flags); /* As ptr might get tagged, call kmemleak hook after KASAN. */ kmemleak_alloc(ptr, size, 1, flags); + kmsan_kmalloc_large(ptr, size, flags); return ptr; } @@ -1397,6 +1410,7 @@ static __always_inline void kfree_hook(void *x) { kmemleak_free(x); kasan_kfree_large(x, _RET_IP_); + kmsan_kfree_large(x); } static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x) @@ -1453,6 +1467,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, } while (object != old_tail); } + do { + object = next; + next = get_freepointer(s, object); + kmsan_slab_free(s, object); + } while (object != old_tail); + /* * Compiler cannot detect this function can be removed if slab_free_hook() * evaluates to nothing. Thus, catch all relevant config debug options here. @@ -2776,6 +2796,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) memset(object, 0, s->object_size); + kmsan_slab_alloc(s, object, gfpflags); slab_post_alloc_hook(s, gfpflags, 1, &object); return object; @@ -2804,6 +2825,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) void *ret = slab_alloc(s, gfpflags, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); ret = kasan_kmalloc(s, ret, size, gfpflags); + return ret; } EXPORT_SYMBOL(kmem_cache_alloc_trace); @@ -2816,7 +2838,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) trace_kmem_cache_alloc_node(_RET_IP_, ret, s->object_size, s->size, gfpflags, node); - return ret; } EXPORT_SYMBOL(kmem_cache_alloc_node); @@ -2832,6 +2853,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, size, s->size, gfpflags, node); ret = kasan_kmalloc(s, ret, size, gfpflags); + return ret; } EXPORT_SYMBOL(kmem_cache_alloc_node_trace); @@ -3157,7 +3179,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { struct kmem_cache_cpu *c; - int i; + int i, j; /* memcg and kmem_cache debug support */ s = slab_pre_alloc_hook(s, flags); @@ -3198,11 +3220,11 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, /* Clear memory outside IRQ disabled fastpath loop */ if (unlikely(slab_want_init_on_alloc(flags, s))) { - int j; - for (j = 0; j < i; j++) memset(p[j], 0, s->object_size); } + for (j = 0; j < i; j++) + kmsan_slab_alloc(s, p[j], flags); /* memcg and kmem_cache debug support */ slab_post_alloc_hook(s, flags, size, p); @@ -3803,6 +3825,7 @@ static int __init setup_slub_min_objects(char *str) __setup("slub_min_objects=", setup_slub_min_objects); +__no_sanitize_memory void *__kmalloc(size_t size, gfp_t flags) { struct kmem_cache *s; @@ -5717,6 +5740,7 @@ static char *create_unique_id(struct kmem_cache *s) p += sprintf(p, "%07u", s->size); BUG_ON(p > name + ID_STR_LENGTH - 1); + kmsan_unpoison_shadow(name, p - name); return name; } @@ -5866,6 +5890,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) al->name = name; al->next = alias_list; alias_list = al; + kmsan_unpoison_shadow(al, sizeof(struct saved_alias)); return 0; }
In order to report uninitialized memory coming from heap allocations KMSAN has to poison them unless they're created with __GFP_ZERO. It's handy that we need KMSAN hooks in the places where init_on_alloc/init_on_free initialization is performed. Signed-off-by: Alexander Potapenko <glider@google.com> To: Alexander Potapenko <glider@google.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Vegard Nossum <vegard.nossum@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: linux-mm@kvack.org --- Change-Id: I51103b7981d3aabed747d0c85cbdc85568665871 --- mm/slub.c | 37 +++++++++++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 6 deletions(-)