diff mbox series

[09/11] kasan: fix memory corruption in kasan_bitops_tags test

Message ID 0c51a7266ea851797dc9816405fc40d860a48db1.1609871239.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan: HW_TAGS tests support and fixes | expand

Commit Message

Andrey Konovalov Jan. 5, 2021, 6:27 p.m. UTC
Since the hardware tag-based KASAN mode might not have a redzone that
comes after an allocated object (when kasan.mode=prod is enabled), the
kasan_bitops_tags() test ends up corrupting the next object in memory.

Change the test so it always accesses the redzone that lies within the
allocated object's boundaries.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Link: https://linux-review.googlesource.com/id/I67f51d1ee48f0a8d0fe2658c2a39e4879fe0832a
---
 lib/test_kasan.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Comments

Alexander Potapenko Jan. 12, 2021, 8:30 a.m. UTC | #1
On Tue, Jan 5, 2021 at 7:28 PM Andrey Konovalov <andreyknvl@google.com> wrote:
>
> Since the hardware tag-based KASAN mode might not have a redzone that
> comes after an allocated object (when kasan.mode=prod is enabled), the
> kasan_bitops_tags() test ends up corrupting the next object in memory.
>
> Change the test so it always accesses the redzone that lies within the
> allocated object's boundaries.
>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> Link: https://linux-review.googlesource.com/id/I67f51d1ee48f0a8d0fe2658c2a39e4879fe0832a
> ---
>  lib/test_kasan.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> index b67da7f6e17f..3ea52da52714 100644
> --- a/lib/test_kasan.c
> +++ b/lib/test_kasan.c
> @@ -771,17 +771,17 @@ static void kasan_bitops_tags(struct kunit *test)
>
>         /* This test is specifically crafted for the tag-based mode. */
>         if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> -               kunit_info(test, "skipping, CONFIG_KASAN_SW_TAGS required");
> +               kunit_info(test, "skipping, CONFIG_KASAN_SW/HW_TAGS required");
>                 return;
>         }
>
> -       /* Allocation size will be rounded to up granule size, which is 16. */
> -       bits = kzalloc(sizeof(*bits), GFP_KERNEL);
> +       /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
> +       bits = kzalloc(48, GFP_KERNEL);

I think it might make sense to call ksize() here to ensure we have
these spare bytes.
Marco Elver Jan. 12, 2021, 1:55 p.m. UTC | #2
On Tue, Jan 05, 2021 at 07:27PM +0100, Andrey Konovalov wrote:
> Since the hardware tag-based KASAN mode might not have a redzone that
> comes after an allocated object (when kasan.mode=prod is enabled), the
> kasan_bitops_tags() test ends up corrupting the next object in memory.
> 
> Change the test so it always accesses the redzone that lies within the
> allocated object's boundaries.
> 
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> Link: https://linux-review.googlesource.com/id/I67f51d1ee48f0a8d0fe2658c2a39e4879fe0832a

Reviewed-by: Marco Elver <elver@google.com>

> ---
>  lib/test_kasan.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> index b67da7f6e17f..3ea52da52714 100644
> --- a/lib/test_kasan.c
> +++ b/lib/test_kasan.c
> @@ -771,17 +771,17 @@ static void kasan_bitops_tags(struct kunit *test)
>  
>  	/* This test is specifically crafted for the tag-based mode. */
>  	if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> -		kunit_info(test, "skipping, CONFIG_KASAN_SW_TAGS required");
> +		kunit_info(test, "skipping, CONFIG_KASAN_SW/HW_TAGS required");
>  		return;
>  	}
>  
> -	/* Allocation size will be rounded to up granule size, which is 16. */
> -	bits = kzalloc(sizeof(*bits), GFP_KERNEL);
> +	/* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
> +	bits = kzalloc(48, GFP_KERNEL);
>  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
>  
> -	/* Do the accesses past the 16 allocated bytes. */
> -	kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
> -	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
> +	/* Do the accesses past the 48 allocated bytes, but within the redone. */
> +	kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
> +	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
>  
>  	kfree(bits);
>  }
> -- 
> 2.29.2.729.g45daf8777d-goog
>
Andrey Konovalov Jan. 12, 2021, 8:06 p.m. UTC | #3
On Tue, Jan 12, 2021 at 9:30 AM Alexander Potapenko <glider@google.com> wrote:
>
> On Tue, Jan 5, 2021 at 7:28 PM Andrey Konovalov <andreyknvl@google.com> wrote:
> >
> > Since the hardware tag-based KASAN mode might not have a redzone that
> > comes after an allocated object (when kasan.mode=prod is enabled), the
> > kasan_bitops_tags() test ends up corrupting the next object in memory.
> >
> > Change the test so it always accesses the redzone that lies within the
> > allocated object's boundaries.
> >
> > Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> > Link: https://linux-review.googlesource.com/id/I67f51d1ee48f0a8d0fe2658c2a39e4879fe0832a
> > ---
> >  lib/test_kasan.c | 12 ++++++------
> >  1 file changed, 6 insertions(+), 6 deletions(-)
> >
> > diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> > index b67da7f6e17f..3ea52da52714 100644
> > --- a/lib/test_kasan.c
> > +++ b/lib/test_kasan.c
> > @@ -771,17 +771,17 @@ static void kasan_bitops_tags(struct kunit *test)
> >
> >         /* This test is specifically crafted for the tag-based mode. */
> >         if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > -               kunit_info(test, "skipping, CONFIG_KASAN_SW_TAGS required");
> > +               kunit_info(test, "skipping, CONFIG_KASAN_SW/HW_TAGS required");
> >                 return;
> >         }
> >
> > -       /* Allocation size will be rounded to up granule size, which is 16. */
> > -       bits = kzalloc(sizeof(*bits), GFP_KERNEL);
> > +       /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
> > +       bits = kzalloc(48, GFP_KERNEL);
>
> I think it might make sense to call ksize() here to ensure we have
> these spare bytes.

Calling ksize() will unpoison the whole object.

I think it's OK to make assumptions about KASAN internals in tests. I
would actually say that we need more tests that check such internal
properties.

Thanks!
Alexander Potapenko Jan. 13, 2021, 12:30 p.m. UTC | #4
On Tue, Jan 12, 2021 at 9:07 PM 'Andrey Konovalov' via kasan-dev
<kasan-dev@googlegroups.com> wrote:
>
> On Tue, Jan 12, 2021 at 9:30 AM Alexander Potapenko <glider@google.com> wrote:
> >
> > On Tue, Jan 5, 2021 at 7:28 PM Andrey Konovalov <andreyknvl@google.com> wrote:
> > >
> > > Since the hardware tag-based KASAN mode might not have a redzone that
> > > comes after an allocated object (when kasan.mode=prod is enabled), the
> > > kasan_bitops_tags() test ends up corrupting the next object in memory.
> > >
> > > Change the test so it always accesses the redzone that lies within the
> > > allocated object's boundaries.
> > >
> > > Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> > > Link: https://linux-review.googlesource.com/id/I67f51d1ee48f0a8d0fe2658c2a39e4879fe0832a
Reviewed-by: Alexander Potapenko <glider@google.com>

> > > ---
> > >  lib/test_kasan.c | 12 ++++++------
> > >  1 file changed, 6 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> > > index b67da7f6e17f..3ea52da52714 100644
> > > --- a/lib/test_kasan.c
> > > +++ b/lib/test_kasan.c
> > > @@ -771,17 +771,17 @@ static void kasan_bitops_tags(struct kunit *test)
> > >
> > >         /* This test is specifically crafted for the tag-based mode. */
> > >         if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > > -               kunit_info(test, "skipping, CONFIG_KASAN_SW_TAGS required");
> > > +               kunit_info(test, "skipping, CONFIG_KASAN_SW/HW_TAGS required");
> > >                 return;
> > >         }
> > >
> > > -       /* Allocation size will be rounded to up granule size, which is 16. */
> > > -       bits = kzalloc(sizeof(*bits), GFP_KERNEL);
> > > +       /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
> > > +       bits = kzalloc(48, GFP_KERNEL);
> >
> > I think it might make sense to call ksize() here to ensure we have
> > these spare bytes.
>
> Calling ksize() will unpoison the whole object.

Ah, that's right.

> I think it's OK to make assumptions about KASAN internals in tests. I
> would actually say that we need more tests that check such internal
> properties.

Agreed.
diff mbox series

Patch

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index b67da7f6e17f..3ea52da52714 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -771,17 +771,17 @@  static void kasan_bitops_tags(struct kunit *test)
 
 	/* This test is specifically crafted for the tag-based mode. */
 	if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
-		kunit_info(test, "skipping, CONFIG_KASAN_SW_TAGS required");
+		kunit_info(test, "skipping, CONFIG_KASAN_SW/HW_TAGS required");
 		return;
 	}
 
-	/* Allocation size will be rounded to up granule size, which is 16. */
-	bits = kzalloc(sizeof(*bits), GFP_KERNEL);
+	/* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
+	bits = kzalloc(48, GFP_KERNEL);
 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
 
-	/* Do the accesses past the 16 allocated bytes. */
-	kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
-	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
+	/* Do the accesses past the 48 allocated bytes, but within the redone. */
+	kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
+	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
 
 	kfree(bits);
 }