diff mbox series

kasan, slub: fix HW_TAGS zeroing with slub_debug

Message ID 678ac92ab790dba9198f9ca14f405651b97c8502.1688561016.git.andreyknvl@google.com (mailing list archive)
State New
Headers show
Series kasan, slub: fix HW_TAGS zeroing with slub_debug | expand

Commit Message

andrey.konovalov@linux.dev July 5, 2023, 12:44 p.m. UTC
From: Andrey Konovalov <andreyknvl@google.com>

Commit 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated
kmalloc space than requested") added precise kmalloc redzone poisoning
to the slub_debug functionality.

However, this commit didn't account for HW_TAGS KASAN fully initializing
the object via its built-in memory initialization feature. Even though
HW_TAGS KASAN memory initialization contains special memory initialization
handling for when slub_debug is enabled, it does not account for in-object
slub_debug redzones. As a result, HW_TAGS KASAN can overwrite these
redzones and cause false-positive slub_debug reports.

To fix the issue, avoid HW_TAGS KASAN memory initialization when slub_debug
is enabled altogether. Implement this by moving the __slub_debug_enabled
check to slab_post_alloc_hook. Common slab code seems like a more
appropriate place for a slub_debug check anyway.

Fixes: 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated kmalloc space than requested")
Cc: <stable@vger.kernel.org>
Reported-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 mm/kasan/kasan.h | 12 ------------
 mm/slab.h        | 16 ++++++++++++++--
 2 files changed, 14 insertions(+), 14 deletions(-)

Comments

Marco Elver July 5, 2023, 12:50 p.m. UTC | #1
On Wed, 5 Jul 2023 at 14:44, <andrey.konovalov@linux.dev> wrote:
>
> From: Andrey Konovalov <andreyknvl@google.com>
>
> Commit 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated
> kmalloc space than requested") added precise kmalloc redzone poisoning
> to the slub_debug functionality.
>
> However, this commit didn't account for HW_TAGS KASAN fully initializing
> the object via its built-in memory initialization feature. Even though
> HW_TAGS KASAN memory initialization contains special memory initialization
> handling for when slub_debug is enabled, it does not account for in-object
> slub_debug redzones. As a result, HW_TAGS KASAN can overwrite these
> redzones and cause false-positive slub_debug reports.
>
> To fix the issue, avoid HW_TAGS KASAN memory initialization when slub_debug
> is enabled altogether. Implement this by moving the __slub_debug_enabled
> check to slab_post_alloc_hook. Common slab code seems like a more
> appropriate place for a slub_debug check anyway.
>
> Fixes: 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated kmalloc space than requested")
> Cc: <stable@vger.kernel.org>
> Reported-by: Mark Rutland <mark.rutland@arm.com>

Is it fixing this issue:

  https://lore.kernel.org/all/20230628154714.GB22090@willie-the-truck/

Or some other issue?

> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>

Other than the question above, it looks sane:

Acked-by: Marco Elver <elver@google.com>

> ---
>  mm/kasan/kasan.h | 12 ------------
>  mm/slab.h        | 16 ++++++++++++++--
>  2 files changed, 14 insertions(+), 14 deletions(-)
>
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index b799f11e45dc..2e973b36fe07 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -466,18 +466,6 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init)
>
>         if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
>                 return;
> -       /*
> -        * Explicitly initialize the memory with the precise object size to
> -        * avoid overwriting the slab redzone. This disables initialization in
> -        * the arch code and may thus lead to performance penalty. This penalty
> -        * does not affect production builds, as slab redzones are not enabled
> -        * there.
> -        */
> -       if (__slub_debug_enabled() &&
> -           init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
> -               init = false;
> -               memzero_explicit((void *)addr, size);
> -       }
>         size = round_up(size, KASAN_GRANULE_SIZE);
>
>         hw_set_mem_tag_range((void *)addr, size, tag, init);
> diff --git a/mm/slab.h b/mm/slab.h
> index 6a5633b25eb5..9c0e09d0f81f 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -723,6 +723,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
>                                         unsigned int orig_size)
>  {
>         unsigned int zero_size = s->object_size;
> +       bool kasan_init = init;
>         size_t i;
>
>         flags &= gfp_allowed_mask;
> @@ -739,6 +740,17 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
>             (s->flags & SLAB_KMALLOC))
>                 zero_size = orig_size;
>
> +       /*
> +        * When slub_debug is enabled, avoid memory initialization integrated
> +        * into KASAN and instead zero out the memory via the memset below with
> +        * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
> +        * cause false-positive reports. This does not lead to a performance
> +        * penalty on production builds, as slub_debug is not intended to be
> +        * enabled there.
> +        */
> +       if (__slub_debug_enabled())
> +               kasan_init = false;
> +
>         /*
>          * As memory initialization might be integrated into KASAN,
>          * kasan_slab_alloc and initialization memset must be
> @@ -747,8 +759,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
>          * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
>          */
>         for (i = 0; i < size; i++) {
> -               p[i] = kasan_slab_alloc(s, p[i], flags, init);
> -               if (p[i] && init && !kasan_has_integrated_init())
> +               p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
> +               if (p[i] && init && (!kasan_init || !kasan_has_integrated_init()))
>                         memset(p[i], 0, zero_size);
>                 kmemleak_alloc_recursive(p[i], s->object_size, 1,
>                                          s->flags, flags);
> --
> 2.25.1
>
Andrey Konovalov July 5, 2023, 1:19 p.m. UTC | #2
On Wed, Jul 5, 2023 at 2:51 PM Marco Elver <elver@google.com> wrote:
>
> On Wed, 5 Jul 2023 at 14:44, <andrey.konovalov@linux.dev> wrote:
> >
> > From: Andrey Konovalov <andreyknvl@google.com>
> >
> > Commit 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated
> > kmalloc space than requested") added precise kmalloc redzone poisoning
> > to the slub_debug functionality.
> >
> > However, this commit didn't account for HW_TAGS KASAN fully initializing
> > the object via its built-in memory initialization feature. Even though
> > HW_TAGS KASAN memory initialization contains special memory initialization
> > handling for when slub_debug is enabled, it does not account for in-object
> > slub_debug redzones. As a result, HW_TAGS KASAN can overwrite these
> > redzones and cause false-positive slub_debug reports.
> >
> > To fix the issue, avoid HW_TAGS KASAN memory initialization when slub_debug
> > is enabled altogether. Implement this by moving the __slub_debug_enabled
> > check to slab_post_alloc_hook. Common slab code seems like a more
> > appropriate place for a slub_debug check anyway.
> >
> > Fixes: 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated kmalloc space than requested")
> > Cc: <stable@vger.kernel.org>
> > Reported-by: Mark Rutland <mark.rutland@arm.com>
>
> Is it fixing this issue:
>
>   https://lore.kernel.org/all/20230628154714.GB22090@willie-the-truck/

Yes, my bad, messed up the Reported-by tag. The correct one should be:

Reported-by: Will Deacon <will@kernel.org>

> Other than the question above, it looks sane:
>
> Acked-by: Marco Elver <elver@google.com>

Thank you, Marco!
Will Deacon July 6, 2023, 9:03 a.m. UTC | #3
On Wed, Jul 05, 2023 at 03:19:06PM +0200, Andrey Konovalov wrote:
> On Wed, Jul 5, 2023 at 2:51 PM Marco Elver <elver@google.com> wrote:
> >
> > On Wed, 5 Jul 2023 at 14:44, <andrey.konovalov@linux.dev> wrote:
> > >
> > > From: Andrey Konovalov <andreyknvl@google.com>
> > >
> > > Commit 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated
> > > kmalloc space than requested") added precise kmalloc redzone poisoning
> > > to the slub_debug functionality.
> > >
> > > However, this commit didn't account for HW_TAGS KASAN fully initializing
> > > the object via its built-in memory initialization feature. Even though
> > > HW_TAGS KASAN memory initialization contains special memory initialization
> > > handling for when slub_debug is enabled, it does not account for in-object
> > > slub_debug redzones. As a result, HW_TAGS KASAN can overwrite these
> > > redzones and cause false-positive slub_debug reports.
> > >
> > > To fix the issue, avoid HW_TAGS KASAN memory initialization when slub_debug
> > > is enabled altogether. Implement this by moving the __slub_debug_enabled
> > > check to slab_post_alloc_hook. Common slab code seems like a more
> > > appropriate place for a slub_debug check anyway.
> > >
> > > Fixes: 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated kmalloc space than requested")
> > > Cc: <stable@vger.kernel.org>
> > > Reported-by: Mark Rutland <mark.rutland@arm.com>
> >
> > Is it fixing this issue:
> >
> >   https://lore.kernel.org/all/20230628154714.GB22090@willie-the-truck/
> 
> Yes, my bad, messed up the Reported-by tag. The correct one should be:
> 
> Reported-by: Will Deacon <will@kernel.org>
> 
> > Other than the question above, it looks sane:
> >
> > Acked-by: Marco Elver <elver@google.com>
> 
> Thank you, Marco!

Cheers, this seems to fix the splats for me:

Tested-by: Will Deacon <will@kernel.org>

Will
Vlastimil Babka July 10, 2023, 9:53 a.m. UTC | #4
On 7/5/23 14:44, andrey.konovalov@linux.dev wrote:
> From: Andrey Konovalov <andreyknvl@google.com>
> 
> Commit 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated
> kmalloc space than requested") added precise kmalloc redzone poisoning
> to the slub_debug functionality.
> 
> However, this commit didn't account for HW_TAGS KASAN fully initializing
> the object via its built-in memory initialization feature. Even though
> HW_TAGS KASAN memory initialization contains special memory initialization
> handling for when slub_debug is enabled, it does not account for in-object
> slub_debug redzones. As a result, HW_TAGS KASAN can overwrite these
> redzones and cause false-positive slub_debug reports.
> 
> To fix the issue, avoid HW_TAGS KASAN memory initialization when slub_debug
> is enabled altogether. Implement this by moving the __slub_debug_enabled
> check to slab_post_alloc_hook. Common slab code seems like a more
> appropriate place for a slub_debug check anyway.
> 
> Fixes: 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated kmalloc space than requested")
> Cc: <stable@vger.kernel.org>
> Reported-by: Mark Rutland <mark.rutland@arm.com>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

Thanks!
diff mbox series

Patch

diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index b799f11e45dc..2e973b36fe07 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -466,18 +466,6 @@  static inline void kasan_unpoison(const void *addr, size_t size, bool init)
 
 	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
 		return;
-	/*
-	 * Explicitly initialize the memory with the precise object size to
-	 * avoid overwriting the slab redzone. This disables initialization in
-	 * the arch code and may thus lead to performance penalty. This penalty
-	 * does not affect production builds, as slab redzones are not enabled
-	 * there.
-	 */
-	if (__slub_debug_enabled() &&
-	    init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
-		init = false;
-		memzero_explicit((void *)addr, size);
-	}
 	size = round_up(size, KASAN_GRANULE_SIZE);
 
 	hw_set_mem_tag_range((void *)addr, size, tag, init);
diff --git a/mm/slab.h b/mm/slab.h
index 6a5633b25eb5..9c0e09d0f81f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -723,6 +723,7 @@  static inline void slab_post_alloc_hook(struct kmem_cache *s,
 					unsigned int orig_size)
 {
 	unsigned int zero_size = s->object_size;
+	bool kasan_init = init;
 	size_t i;
 
 	flags &= gfp_allowed_mask;
@@ -739,6 +740,17 @@  static inline void slab_post_alloc_hook(struct kmem_cache *s,
 	    (s->flags & SLAB_KMALLOC))
 		zero_size = orig_size;
 
+	/*
+	 * When slub_debug is enabled, avoid memory initialization integrated
+	 * into KASAN and instead zero out the memory via the memset below with
+	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
+	 * cause false-positive reports. This does not lead to a performance
+	 * penalty on production builds, as slub_debug is not intended to be
+	 * enabled there.
+	 */
+	if (__slub_debug_enabled())
+		kasan_init = false;
+
 	/*
 	 * As memory initialization might be integrated into KASAN,
 	 * kasan_slab_alloc and initialization memset must be
@@ -747,8 +759,8 @@  static inline void slab_post_alloc_hook(struct kmem_cache *s,
 	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
 	 */
 	for (i = 0; i < size; i++) {
-		p[i] = kasan_slab_alloc(s, p[i], flags, init);
-		if (p[i] && init && !kasan_has_integrated_init())
+		p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
+		if (p[i] && init && (!kasan_init || !kasan_has_integrated_init()))
 			memset(p[i], 0, zero_size);
 		kmemleak_alloc_recursive(p[i], s->object_size, 1,
 					 s->flags, flags);