diff mbox series

[11/11] kasan: add proper page allocator tests

Message ID 15ca1976b26aa9edcec4a9d0f3b73f5b6536e5d0.1609871239.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan: HW_TAGS tests support and fixes | expand

Commit Message

Andrey Konovalov Jan. 5, 2021, 6:27 p.m. UTC
The currently existing page allocator tests rely on kmalloc fallback
with large sizes that is only present for SLUB. Add proper tests that
use alloc/free_pages().

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Link: https://linux-review.googlesource.com/id/Ia173d5a1b215fe6b2548d814ef0f4433cf983570
---
 lib/test_kasan.c | 54 +++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 49 insertions(+), 5 deletions(-)

Comments

Alexander Potapenko Jan. 12, 2021, 8:57 a.m. UTC | #1
On Tue, Jan 5, 2021 at 7:28 PM Andrey Konovalov <andreyknvl@google.com> wrote:
>
> The currently existing page allocator tests rely on kmalloc fallback
> with large sizes that is only present for SLUB. Add proper tests that
> use alloc/free_pages().
>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> Link: https://linux-review.googlesource.com/id/Ia173d5a1b215fe6b2548d814ef0f4433cf983570
Reviewed-by: Alexander Potapenko <glider@google.com>
Marco Elver Jan. 12, 2021, 2:34 p.m. UTC | #2
On Tue, Jan 05, 2021 at 07:27PM +0100, Andrey Konovalov wrote:
> The currently existing page allocator tests rely on kmalloc fallback
> with large sizes that is only present for SLUB. Add proper tests that
> use alloc/free_pages().
> 
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> Link: https://linux-review.googlesource.com/id/Ia173d5a1b215fe6b2548d814ef0f4433cf983570

Reviewed-by: Marco Elver <elver@google.com>

> ---
>  lib/test_kasan.c | 54 +++++++++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 49 insertions(+), 5 deletions(-)
> 
> diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> index 6261521e57ad..24798c034d05 100644
> --- a/lib/test_kasan.c
> +++ b/lib/test_kasan.c
> @@ -128,6 +128,12 @@ static void kmalloc_node_oob_right(struct kunit *test)
>  	kfree(ptr);
>  }
>  
> +/*
> + * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
> + * fit into a slab cache and therefore is allocated via the page allocator
> + * fallback. Since this kind of fallback is only implemented for SLUB, these
> + * tests are limited to that allocator.
> + */
>  static void kmalloc_pagealloc_oob_right(struct kunit *test)
>  {
>  	char *ptr;
> @@ -138,14 +144,11 @@ static void kmalloc_pagealloc_oob_right(struct kunit *test)
>  		return;
>  	}
>  
> -	/*
> -	 * Allocate a chunk that does not fit into a SLUB cache to trigger
> -	 * the page allocator fallback.
> -	 */
>  	ptr = kmalloc(size, GFP_KERNEL);
>  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
>  
>  	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
> +
>  	kfree(ptr);
>  }
>  
> @@ -161,8 +164,8 @@ static void kmalloc_pagealloc_uaf(struct kunit *test)
>  
>  	ptr = kmalloc(size, GFP_KERNEL);
>  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> -
>  	kfree(ptr);
> +
>  	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
>  }
>  
> @@ -182,6 +185,45 @@ static void kmalloc_pagealloc_invalid_free(struct kunit *test)
>  	KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
>  }
>  
> +static void pagealloc_oob_right(struct kunit *test)
> +{
> +	char *ptr;
> +	struct page *pages;
> +	size_t order = 4;
> +	size_t size = (1UL << (PAGE_SHIFT + order));
> +
> +	/*
> +	 * With generic KASAN page allocations have no redzones, thus
> +	 * out-of-bounds detection is not guaranteed.
> +	 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
> +	 */
> +	if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> +		kunit_info(test, "skipping, CONFIG_KASAN_GENERIC enabled");
> +		return;
> +	}
> +
> +	pages = alloc_pages(GFP_KERNEL, order);
> +	ptr = page_address(pages);
> +	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> +
> +	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
> +	free_pages((unsigned long)ptr, order);
> +}
> +
> +static void pagealloc_uaf(struct kunit *test)
> +{
> +	char *ptr;
> +	struct page *pages;
> +	size_t order = 4;
> +
> +	pages = alloc_pages(GFP_KERNEL, order);
> +	ptr = page_address(pages);
> +	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> +	free_pages((unsigned long)ptr, order);
> +
> +	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
> +}
> +
>  static void kmalloc_large_oob_right(struct kunit *test)
>  {
>  	char *ptr;
> @@ -933,6 +975,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
>  	KUNIT_CASE(kmalloc_pagealloc_oob_right),
>  	KUNIT_CASE(kmalloc_pagealloc_uaf),
>  	KUNIT_CASE(kmalloc_pagealloc_invalid_free),
> +	KUNIT_CASE(pagealloc_oob_right),
> +	KUNIT_CASE(pagealloc_uaf),
>  	KUNIT_CASE(kmalloc_large_oob_right),
>  	KUNIT_CASE(kmalloc_oob_krealloc_more),
>  	KUNIT_CASE(kmalloc_oob_krealloc_less),
> -- 
> 2.29.2.729.g45daf8777d-goog
>
diff mbox series

Patch

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 6261521e57ad..24798c034d05 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -128,6 +128,12 @@  static void kmalloc_node_oob_right(struct kunit *test)
 	kfree(ptr);
 }
 
+/*
+ * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
+ * fit into a slab cache and therefore is allocated via the page allocator
+ * fallback. Since this kind of fallback is only implemented for SLUB, these
+ * tests are limited to that allocator.
+ */
 static void kmalloc_pagealloc_oob_right(struct kunit *test)
 {
 	char *ptr;
@@ -138,14 +144,11 @@  static void kmalloc_pagealloc_oob_right(struct kunit *test)
 		return;
 	}
 
-	/*
-	 * Allocate a chunk that does not fit into a SLUB cache to trigger
-	 * the page allocator fallback.
-	 */
 	ptr = kmalloc(size, GFP_KERNEL);
 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
+
 	kfree(ptr);
 }
 
@@ -161,8 +164,8 @@  static void kmalloc_pagealloc_uaf(struct kunit *test)
 
 	ptr = kmalloc(size, GFP_KERNEL);
 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
 	kfree(ptr);
+
 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
 }
 
@@ -182,6 +185,45 @@  static void kmalloc_pagealloc_invalid_free(struct kunit *test)
 	KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
 }
 
+static void pagealloc_oob_right(struct kunit *test)
+{
+	char *ptr;
+	struct page *pages;
+	size_t order = 4;
+	size_t size = (1UL << (PAGE_SHIFT + order));
+
+	/*
+	 * With generic KASAN page allocations have no redzones, thus
+	 * out-of-bounds detection is not guaranteed.
+	 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
+	 */
+	if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+		kunit_info(test, "skipping, CONFIG_KASAN_GENERIC enabled");
+		return;
+	}
+
+	pages = alloc_pages(GFP_KERNEL, order);
+	ptr = page_address(pages);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
+	free_pages((unsigned long)ptr, order);
+}
+
+static void pagealloc_uaf(struct kunit *test)
+{
+	char *ptr;
+	struct page *pages;
+	size_t order = 4;
+
+	pages = alloc_pages(GFP_KERNEL, order);
+	ptr = page_address(pages);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+	free_pages((unsigned long)ptr, order);
+
+	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
+}
+
 static void kmalloc_large_oob_right(struct kunit *test)
 {
 	char *ptr;
@@ -933,6 +975,8 @@  static struct kunit_case kasan_kunit_test_cases[] = {
 	KUNIT_CASE(kmalloc_pagealloc_oob_right),
 	KUNIT_CASE(kmalloc_pagealloc_uaf),
 	KUNIT_CASE(kmalloc_pagealloc_invalid_free),
+	KUNIT_CASE(pagealloc_oob_right),
+	KUNIT_CASE(pagealloc_uaf),
 	KUNIT_CASE(kmalloc_large_oob_right),
 	KUNIT_CASE(kmalloc_oob_krealloc_more),
 	KUNIT_CASE(kmalloc_oob_krealloc_less),