Message ID | 88fc85e2a8cca03f2bfcae76100d1a3d54eac840.1702514411.git.andreyknvl@google.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | lib/stackdepot, kasan: fixes for stack eviction series | expand |
On Thu, 14 Dec 2023 at 01:48, <andrey.konovalov@linux.dev> wrote: > > From: Andrey Konovalov <andreyknvl@google.com> > > kasan_record_aux_stack can be called concurrently on the same object. > This might lead to a race condition when rotating the saved aux stack > trace handles, which in turns leads to incorrect accounting of stack > depot handles and refcount underflows in the stack depot code. > > Fix by introducing a spinlock to protect the aux stack trace handles > in kasan_record_aux_stack. > > Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp> > Reported-by: syzbot+186b55175d8360728234@syzkaller.appspotmail.com > Closes: https://lore.kernel.org/all/000000000000784b1c060b0074a2@google.com/ > Fixes: 773688a6cb24 ("kasan: use stack_depot_put for Generic mode") > Signed-off-by: Andrey Konovalov <andreyknvl@google.com> > > --- > > Changes v1->v2: > - Use per-object spinlock instead of a global one. > --- > mm/kasan/generic.c | 32 +++++++++++++++++++++++++++++--- > mm/kasan/kasan.h | 2 ++ > 2 files changed, 31 insertions(+), 3 deletions(-) > > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c > index 54e20b2bc3e1..b9d41d6c70fd 100644 > --- a/mm/kasan/generic.c > +++ b/mm/kasan/generic.c > @@ -25,6 +25,7 @@ > #include <linux/sched.h> > #include <linux/sched/task_stack.h> > #include <linux/slab.h> > +#include <linux/spinlock.h> > #include <linux/stackdepot.h> > #include <linux/stacktrace.h> > #include <linux/string.h> > @@ -471,8 +472,18 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object) > struct kasan_free_meta *free_meta; > > alloc_meta = kasan_get_alloc_meta(cache, object); > - if (alloc_meta) > + if (alloc_meta) { > __memset(alloc_meta, 0, sizeof(*alloc_meta)); > + > + /* > + * Temporarily disable KASAN bug reporting to allow instrumented > + * spin_lock_init to access aux_lock, which resides inside of a > + * redzone. > + */ > + kasan_disable_current(); > + spin_lock_init(&alloc_meta->aux_lock); > + kasan_enable_current(); > + } > free_meta = kasan_get_free_meta(cache, object); > if (free_meta) > __memset(free_meta, 0, sizeof(*free_meta)); > @@ -502,6 +513,8 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) > struct kmem_cache *cache; > struct kasan_alloc_meta *alloc_meta; > void *object; > + depot_stack_handle_t new_handle, old_handle; > + unsigned long flags; > > if (is_kfence_address(addr) || !slab) > return; > @@ -512,9 +525,22 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) > if (!alloc_meta) > return; > > - stack_depot_put(alloc_meta->aux_stack[1]); > + new_handle = kasan_save_stack(0, depot_flags); > + > + /* > + * Temporarily disable KASAN bug reporting to allow instrumented > + * spinlock functions to access aux_lock, which resides inside of a > + * redzone. > + */ > + kasan_disable_current(); > + spin_lock_irqsave(&alloc_meta->aux_lock, flags); > + old_handle = alloc_meta->aux_stack[1]; > alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; > - alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags); > + alloc_meta->aux_stack[0] = new_handle; > + spin_unlock_irqrestore(&alloc_meta->aux_lock, flags); > + kasan_enable_current(); > + > + stack_depot_put(old_handle); > } > > void kasan_record_aux_stack(void *addr) > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h > index 5e298e3ac909..8b4125fecdc7 100644 > --- a/mm/kasan/kasan.h > +++ b/mm/kasan/kasan.h > @@ -6,6 +6,7 @@ > #include <linux/kasan.h> > #include <linux/kasan-tags.h> > #include <linux/kfence.h> > +#include <linux/spinlock.h> > #include <linux/stackdepot.h> > > #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) > @@ -249,6 +250,7 @@ struct kasan_global { > struct kasan_alloc_meta { > struct kasan_track alloc_track; > /* Free track is stored in kasan_free_meta. */ > + spinlock_t aux_lock; This needs to be raw_spinlock, because kasan_record_aux_stack_noalloc() can be called from non-sleepable contexts (otherwise lockdep will complain for RT kernels).
On Thu, Dec 14, 2023 at 9:35 AM Marco Elver <elver@google.com> wrote: > > > #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) > > @@ -249,6 +250,7 @@ struct kasan_global { > > struct kasan_alloc_meta { > > struct kasan_track alloc_track; > > /* Free track is stored in kasan_free_meta. */ > > + spinlock_t aux_lock; > > This needs to be raw_spinlock, because > kasan_record_aux_stack_noalloc() can be called from non-sleepable > contexts (otherwise lockdep will complain for RT kernels). Right, will fix in v3. Thank you!
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 54e20b2bc3e1..b9d41d6c70fd 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -25,6 +25,7 @@ #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/stackdepot.h> #include <linux/stacktrace.h> #include <linux/string.h> @@ -471,8 +472,18 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object) struct kasan_free_meta *free_meta; alloc_meta = kasan_get_alloc_meta(cache, object); - if (alloc_meta) + if (alloc_meta) { __memset(alloc_meta, 0, sizeof(*alloc_meta)); + + /* + * Temporarily disable KASAN bug reporting to allow instrumented + * spin_lock_init to access aux_lock, which resides inside of a + * redzone. + */ + kasan_disable_current(); + spin_lock_init(&alloc_meta->aux_lock); + kasan_enable_current(); + } free_meta = kasan_get_free_meta(cache, object); if (free_meta) __memset(free_meta, 0, sizeof(*free_meta)); @@ -502,6 +513,8 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) struct kmem_cache *cache; struct kasan_alloc_meta *alloc_meta; void *object; + depot_stack_handle_t new_handle, old_handle; + unsigned long flags; if (is_kfence_address(addr) || !slab) return; @@ -512,9 +525,22 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) if (!alloc_meta) return; - stack_depot_put(alloc_meta->aux_stack[1]); + new_handle = kasan_save_stack(0, depot_flags); + + /* + * Temporarily disable KASAN bug reporting to allow instrumented + * spinlock functions to access aux_lock, which resides inside of a + * redzone. + */ + kasan_disable_current(); + spin_lock_irqsave(&alloc_meta->aux_lock, flags); + old_handle = alloc_meta->aux_stack[1]; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; - alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags); + alloc_meta->aux_stack[0] = new_handle; + spin_unlock_irqrestore(&alloc_meta->aux_lock, flags); + kasan_enable_current(); + + stack_depot_put(old_handle); } void kasan_record_aux_stack(void *addr) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 5e298e3ac909..8b4125fecdc7 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -6,6 +6,7 @@ #include <linux/kasan.h> #include <linux/kasan-tags.h> #include <linux/kfence.h> +#include <linux/spinlock.h> #include <linux/stackdepot.h> #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) @@ -249,6 +250,7 @@ struct kasan_global { struct kasan_alloc_meta { struct kasan_track alloc_track; /* Free track is stored in kasan_free_meta. */ + spinlock_t aux_lock; depot_stack_handle_t aux_stack[2]; };