diff mbox series

[5/5] mm/slub, kunit: Add testcase for krealloc redzone and zeroing

Message ID 20240909012958.913438-6-feng.tang@intel.com (mailing list archive)
State New
Headers show
Series mm/slub: Improve data handling of krealloc() when orig_size is enabled | expand

Commit Message

Feng Tang Sept. 9, 2024, 1:29 a.m. UTC
Danilo Krummrich raised issue about krealloc+GFP_ZERO [1], and Vlastimil
suggested to add some test case which can sanity test the kmalloc-redzone
and zeroing by utilizing the kmalloc's 'orig_size' debug feature.

It covers the grow and shrink case of krealloc() re-using current kmalloc
object, and the case of re-allocating a new bigger object.

User can add "slub_debug" kernel cmdline parameter to test it.

[1]. https://lore.kernel.org/lkml/20240812223707.32049-1-dakr@kernel.org/

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Feng Tang <feng.tang@intel.com>
---
 lib/slub_kunit.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 46 insertions(+)

Comments

Danilo Krummrich Sept. 10, 2024, 10:09 a.m. UTC | #1
On Mon, Sep 09, 2024 at 09:29:58AM +0800, Feng Tang wrote:
> Danilo Krummrich raised issue about krealloc+GFP_ZERO [1], and Vlastimil
> suggested to add some test case which can sanity test the kmalloc-redzone
> and zeroing by utilizing the kmalloc's 'orig_size' debug feature.
> 
> It covers the grow and shrink case of krealloc() re-using current kmalloc
> object, and the case of re-allocating a new bigger object.
> 
> User can add "slub_debug" kernel cmdline parameter to test it.
> 
> [1]. https://lore.kernel.org/lkml/20240812223707.32049-1-dakr@kernel.org/
> 
> Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Feng Tang <feng.tang@intel.com>

Reviewed-by: Danilo Krummrich <dakr@kernel.org>

> ---
>  lib/slub_kunit.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 46 insertions(+)
> 
> diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
> index 6e3a1e5a7142..03e0089149ad 100644
> --- a/lib/slub_kunit.c
> +++ b/lib/slub_kunit.c
> @@ -186,6 +186,51 @@ static void test_leak_destroy(struct kunit *test)
>  	KUNIT_EXPECT_EQ(test, 1, slab_errors);
>  }
>  
> +static void test_krealloc_redzone_zeroing(struct kunit *test)
> +{
> +	char *p;
> +	int i;
> +
> +	KUNIT_TEST_REQUIRES(test, __slub_debug_enabled());
> +
> +	/* Allocate a 64B kmalloc object */
> +	p = kzalloc(48, GFP_KERNEL);
> +	if (unlikely(is_kfence_address(p))) {
> +		kfree(p);
> +		return;
> +	}
> +	memset(p, 0xff, 48);
> +
> +	kasan_disable_current();
> +	OPTIMIZER_HIDE_VAR(p);
> +
> +	/* Test shrink */
> +	p = krealloc(p, 40, GFP_KERNEL | __GFP_ZERO);
> +	for (i = 40; i < 64; i++)
> +		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
> +
> +	/* Test grow within the same 64B kmalloc object */
> +	p = krealloc(p, 56, GFP_KERNEL | __GFP_ZERO);
> +	for (i = 40; i < 56; i++)
> +		KUNIT_EXPECT_EQ(test, p[i], 0);
> +	for (i = 56; i < 64; i++)
> +		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
> +
> +	/* Test grow with allocating a bigger 128B object */
> +	p = krealloc(p, 112, GFP_KERNEL | __GFP_ZERO);
> +	if (unlikely(is_kfence_address(p)))
> +		goto exit;
> +
> +	for (i = 56; i < 112; i++)
> +		KUNIT_EXPECT_EQ(test, p[i], 0);
> +	for (i = 112; i < 128; i++)
> +		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
> +
> +exit:
> +	kfree(p);
> +	kasan_enable_current();
> +}
> +
>  static int test_init(struct kunit *test)
>  {
>  	slab_errors = 0;
> @@ -196,6 +241,7 @@ static int test_init(struct kunit *test)
>  }
>  
>  static struct kunit_case test_cases[] = {
> +	KUNIT_CASE(test_krealloc_redzone_zeroing),
>  	KUNIT_CASE(test_clobber_zone),
>  
>  #ifndef CONFIG_KASAN
> -- 
> 2.34.1
>
Vlastimil Babka Sept. 10, 2024, 1:29 p.m. UTC | #2
On 9/9/24 03:29, Feng Tang wrote:
> Danilo Krummrich raised issue about krealloc+GFP_ZERO [1], and Vlastimil
> suggested to add some test case which can sanity test the kmalloc-redzone
> and zeroing by utilizing the kmalloc's 'orig_size' debug feature.
> 
> It covers the grow and shrink case of krealloc() re-using current kmalloc
> object, and the case of re-allocating a new bigger object.
> 
> User can add "slub_debug" kernel cmdline parameter to test it.
> 
> [1]. https://lore.kernel.org/lkml/20240812223707.32049-1-dakr@kernel.org/
> 
> Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Feng Tang <feng.tang@intel.com>
> ---
>  lib/slub_kunit.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 46 insertions(+)
> 
> diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
> index 6e3a1e5a7142..03e0089149ad 100644
> --- a/lib/slub_kunit.c
> +++ b/lib/slub_kunit.c
> @@ -186,6 +186,51 @@ static void test_leak_destroy(struct kunit *test)
>  	KUNIT_EXPECT_EQ(test, 1, slab_errors);
>  }
>  
> +static void test_krealloc_redzone_zeroing(struct kunit *test)
> +{
> +	char *p;
> +	int i;
> +
> +	KUNIT_TEST_REQUIRES(test, __slub_debug_enabled());

AFAICS this is insufficient, because the static key may be enabled due to
debugging enabled for different caches than kmalloc, or it might not include
both red zone and object tracking.

But it should be possible to instead create a fake kmalloc cache of size 64
and use __kmalloc_cache_noprof() like test_kmalloc_redzone_access()?

> +
> +	/* Allocate a 64B kmalloc object */
> +	p = kzalloc(48, GFP_KERNEL);
> +	if (unlikely(is_kfence_address(p))) {
> +		kfree(p);
> +		return;
> +	}
> +	memset(p, 0xff, 48);
> +
> +	kasan_disable_current();
> +	OPTIMIZER_HIDE_VAR(p);
> +
> +	/* Test shrink */
> +	p = krealloc(p, 40, GFP_KERNEL | __GFP_ZERO);
> +	for (i = 40; i < 64; i++)
> +		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
> +
> +	/* Test grow within the same 64B kmalloc object */
> +	p = krealloc(p, 56, GFP_KERNEL | __GFP_ZERO);
> +	for (i = 40; i < 56; i++)
> +		KUNIT_EXPECT_EQ(test, p[i], 0);
> +	for (i = 56; i < 64; i++)
> +		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
> +
> +	/* Test grow with allocating a bigger 128B object */
> +	p = krealloc(p, 112, GFP_KERNEL | __GFP_ZERO);

The only downside is that krealloc() here might use kmalloc-128 cache that's
not doing red zoning and object tracking....

> +	if (unlikely(is_kfence_address(p)))
> +		goto exit;
> +
> +	for (i = 56; i < 112; i++)
> +		KUNIT_EXPECT_EQ(test, p[i], 0);

... but this test is still valid and necessary

> +	for (i = 112; i < 128; i++)
> +		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);

... we might skip this test as the red zoning is not done by __do_krealloc()
anyway in the alloc_new case.

> +
> +exit:
> +	kfree(p);

Ideally we'd also validate the fake kmalloc cache we created and expect zero
slab_errors.

Hopefully this approach works and I'm not missing something...

> +	kasan_enable_current();
> +}
> +
>  static int test_init(struct kunit *test)
>  {
>  	slab_errors = 0;
> @@ -196,6 +241,7 @@ static int test_init(struct kunit *test)
>  }
>  
>  static struct kunit_case test_cases[] = {
> +	KUNIT_CASE(test_krealloc_redzone_zeroing),
>  	KUNIT_CASE(test_clobber_zone),
>  
>  #ifndef CONFIG_KASAN
Feng Tang Sept. 10, 2024, 2:08 p.m. UTC | #3
On Tue, Sep 10, 2024 at 03:29:21PM +0200, Vlastimil Babka wrote:
> On 9/9/24 03:29, Feng Tang wrote:
> > Danilo Krummrich raised issue about krealloc+GFP_ZERO [1], and Vlastimil
> > suggested to add some test case which can sanity test the kmalloc-redzone
> > and zeroing by utilizing the kmalloc's 'orig_size' debug feature.
> > 
> > It covers the grow and shrink case of krealloc() re-using current kmalloc
> > object, and the case of re-allocating a new bigger object.
> > 
> > User can add "slub_debug" kernel cmdline parameter to test it.
> > 
> > [1]. https://lore.kernel.org/lkml/20240812223707.32049-1-dakr@kernel.org/
> > 
> > Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> > Signed-off-by: Feng Tang <feng.tang@intel.com>
> > ---
> >  lib/slub_kunit.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 46 insertions(+)
> > 
> > diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
> > index 6e3a1e5a7142..03e0089149ad 100644
> > --- a/lib/slub_kunit.c
> > +++ b/lib/slub_kunit.c
> > @@ -186,6 +186,51 @@ static void test_leak_destroy(struct kunit *test)
> >  	KUNIT_EXPECT_EQ(test, 1, slab_errors);
> >  }
> >  
> > +static void test_krealloc_redzone_zeroing(struct kunit *test)
> > +{
> > +	char *p;
> > +	int i;
> > +
> > +	KUNIT_TEST_REQUIRES(test, __slub_debug_enabled());
> 
> AFAICS this is insufficient, because the static key may be enabled due to
> debugging enabled for different caches than kmalloc, or it might not include
> both red zone and object tracking.

You are right, that concerned me too. In first version, I make it depend
on CONFIG_SLUB_DEBUG_ON==y, but most user' and distribution's kernel
won't enable it, and user have to rebuild kernel to test. So I changed
to this check finally.

If there is a way to judge whether 'slub_debug' is enabled, that would
solve this issue.

> 
> But it should be possible to instead create a fake kmalloc cache of size 64
> and use __kmalloc_cache_noprof() like test_kmalloc_redzone_access()?

Yep, I thought about that, and the problem was the krealloc a new 128B
object.

> > +
> > +	/* Allocate a 64B kmalloc object */
> > +	p = kzalloc(48, GFP_KERNEL);
> > +	if (unlikely(is_kfence_address(p))) {
> > +		kfree(p);
> > +		return;
> > +	}
> > +	memset(p, 0xff, 48);
> > +
> > +	kasan_disable_current();
> > +	OPTIMIZER_HIDE_VAR(p);
> > +
> > +	/* Test shrink */
> > +	p = krealloc(p, 40, GFP_KERNEL | __GFP_ZERO);
> > +	for (i = 40; i < 64; i++)
> > +		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
> > +
> > +	/* Test grow within the same 64B kmalloc object */
> > +	p = krealloc(p, 56, GFP_KERNEL | __GFP_ZERO);
> > +	for (i = 40; i < 56; i++)
> > +		KUNIT_EXPECT_EQ(test, p[i], 0);
> > +	for (i = 56; i < 64; i++)
> > +		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
> > +
> > +	/* Test grow with allocating a bigger 128B object */
> > +	p = krealloc(p, 112, GFP_KERNEL | __GFP_ZERO);
> 
> The only downside is that krealloc() here might use kmalloc-128 cache that's
> not doing red zoning and object tracking....

Yes.

> > +	if (unlikely(is_kfence_address(p)))
> > +		goto exit;
> > +
> > +	for (i = 56; i < 112; i++)
> > +		KUNIT_EXPECT_EQ(test, p[i], 0);
> 
> ... but this test is still valid and necessary
> 
> > +	for (i = 112; i < 128; i++)
> > +		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
> 
> ... we might skip this test as the red zoning is not done by __do_krealloc()
> anyway in the alloc_new case.
> 
> > +
> > +exit:
> > +	kfree(p);
> 
> Ideally we'd also validate the fake kmalloc cache we created and expect zero
> slab_errors.
> 
> Hopefully this approach works and I'm not missing something...

Yep, this should work. As redzone was tested in earlier check, and
not necessary to be checked again here. Will do some test on this.

Thanks,
Feng
diff mbox series

Patch

diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 6e3a1e5a7142..03e0089149ad 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -186,6 +186,51 @@  static void test_leak_destroy(struct kunit *test)
 	KUNIT_EXPECT_EQ(test, 1, slab_errors);
 }
 
+static void test_krealloc_redzone_zeroing(struct kunit *test)
+{
+	char *p;
+	int i;
+
+	KUNIT_TEST_REQUIRES(test, __slub_debug_enabled());
+
+	/* Allocate a 64B kmalloc object */
+	p = kzalloc(48, GFP_KERNEL);
+	if (unlikely(is_kfence_address(p))) {
+		kfree(p);
+		return;
+	}
+	memset(p, 0xff, 48);
+
+	kasan_disable_current();
+	OPTIMIZER_HIDE_VAR(p);
+
+	/* Test shrink */
+	p = krealloc(p, 40, GFP_KERNEL | __GFP_ZERO);
+	for (i = 40; i < 64; i++)
+		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
+
+	/* Test grow within the same 64B kmalloc object */
+	p = krealloc(p, 56, GFP_KERNEL | __GFP_ZERO);
+	for (i = 40; i < 56; i++)
+		KUNIT_EXPECT_EQ(test, p[i], 0);
+	for (i = 56; i < 64; i++)
+		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
+
+	/* Test grow with allocating a bigger 128B object */
+	p = krealloc(p, 112, GFP_KERNEL | __GFP_ZERO);
+	if (unlikely(is_kfence_address(p)))
+		goto exit;
+
+	for (i = 56; i < 112; i++)
+		KUNIT_EXPECT_EQ(test, p[i], 0);
+	for (i = 112; i < 128; i++)
+		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
+
+exit:
+	kfree(p);
+	kasan_enable_current();
+}
+
 static int test_init(struct kunit *test)
 {
 	slab_errors = 0;
@@ -196,6 +241,7 @@  static int test_init(struct kunit *test)
 }
 
 static struct kunit_case test_cases[] = {
+	KUNIT_CASE(test_krealloc_redzone_zeroing),
 	KUNIT_CASE(test_clobber_zone),
 
 #ifndef CONFIG_KASAN