diff mbox series

[03/31] kasan, page_alloc: merge kasan_free_pages into free_pages_prepare

Message ID 64f8b74a4766f886a6df77438e7e098205fd0863.1638308023.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan, vmalloc, arm64: add vmalloc tagging support for SW/HW_TAGS | expand

Commit Message

andrey.konovalov@linux.dev Nov. 30, 2021, 9:39 p.m. UTC
From: Andrey Konovalov <andreyknvl@google.com>

Currently, the code responsible for initializing and poisoning memory
in free_pages_prepare() is scattered across two locations:
kasan_free_pages() for HW_TAGS KASAN and free_pages_prepare() itself.
This is confusing.

This and a few following patches combine the code from these two
locations. Along the way, these patches also simplify the performed
checks to make them easier to follow.

This patch replaces the only caller of kasan_free_pages() with its
implementation.

As kasan_has_integrated_init() is only true when CONFIG_KASAN_HW_TAGS
is enabled, moving the code does no functional changes.

This patch is not useful by itself but makes the simplifications in
the following patches easier to follow.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 include/linux/kasan.h |  8 --------
 mm/kasan/common.c     |  2 +-
 mm/kasan/hw_tags.c    | 11 -----------
 mm/page_alloc.c       |  6 ++++--
 4 files changed, 5 insertions(+), 22 deletions(-)

Comments

Alexander Potapenko Dec. 2, 2021, 3:32 p.m. UTC | #1
On Tue, Nov 30, 2021 at 10:40 PM <andrey.konovalov@linux.dev> wrote:
>
> From: Andrey Konovalov <andreyknvl@google.com>
>
> Currently, the code responsible for initializing and poisoning memory
> in free_pages_prepare() is scattered across two locations:
> kasan_free_pages() for HW_TAGS KASAN and free_pages_prepare() itself.
> This is confusing.
>
> This and a few following patches combine the code from these two
> locations. Along the way, these patches also simplify the performed
> checks to make them easier to follow.
>
> This patch replaces the only caller of kasan_free_pages() with its
> implementation.
>
> As kasan_has_integrated_init() is only true when CONFIG_KASAN_HW_TAGS
> is enabled, moving the code does no functional changes.
>
> This patch is not useful by itself but makes the simplifications in
> the following patches easier to follow.
>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>

> ---
>  include/linux/kasan.h |  8 --------
>  mm/kasan/common.c     |  2 +-
>  mm/kasan/hw_tags.c    | 11 -----------
>  mm/page_alloc.c       |  6 ++++--
>  4 files changed, 5 insertions(+), 22 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index d8783b682669..89a43d8ae4fe 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -95,7 +95,6 @@ static inline bool kasan_hw_tags_enabled(void)
>  }
>
>  void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
> -void kasan_free_pages(struct page *page, unsigned int order);
>
>  #else /* CONFIG_KASAN_HW_TAGS */
>
> @@ -116,13 +115,6 @@ static __always_inline void kasan_alloc_pages(struct page *page,
>         BUILD_BUG();
>  }
>
> -static __always_inline void kasan_free_pages(struct page *page,
> -                                            unsigned int order)
> -{
> -       /* Only available for integrated init. */
> -       BUILD_BUG();
> -}
> -
>  #endif /* CONFIG_KASAN_HW_TAGS */
>
>  static inline bool kasan_has_integrated_init(void)
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 8428da2aaf17..66078cc1b4f0 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -387,7 +387,7 @@ static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
>         }
>
>         /*
> -        * The object will be poisoned by kasan_free_pages() or
> +        * The object will be poisoned by kasan_poison_pages() or
>          * kasan_slab_free_mempool().
>          */
>
> diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
> index 7355cb534e4f..0b8225add2e4 100644
> --- a/mm/kasan/hw_tags.c
> +++ b/mm/kasan/hw_tags.c
> @@ -213,17 +213,6 @@ void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
>         }
>  }
>
> -void kasan_free_pages(struct page *page, unsigned int order)
> -{
> -       /*
> -        * This condition should match the one in free_pages_prepare() in
> -        * page_alloc.c.
> -        */
> -       bool init = want_init_on_free();
> -
> -       kasan_poison_pages(page, order, init);
> -}
> -
>  #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
>
>  void kasan_enable_tagging_sync(void)
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 3589333b5b77..3f3ea41f8c64 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -1353,15 +1353,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
>
>         /*
>          * As memory initialization might be integrated into KASAN,
> -        * kasan_free_pages and kernel_init_free_pages must be
> +        * KASAN poisoning and memory initialization code must be
>          * kept together to avoid discrepancies in behavior.
>          *
>          * With hardware tag-based KASAN, memory tags must be set before the
>          * page becomes unavailable via debug_pagealloc or arch_free_page.
>          */
>         if (kasan_has_integrated_init()) {
> +               bool init = want_init_on_free();
> +
>                 if (!skip_kasan_poison)
> -                       kasan_free_pages(page, order);
> +                       kasan_poison_pages(page, order, init);
>         } else {
>                 bool init = want_init_on_free();
>
> --
> 2.25.1
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/64f8b74a4766f886a6df77438e7e098205fd0863.1638308023.git.andreyknvl%40google.com.
diff mbox series

Patch

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d8783b682669..89a43d8ae4fe 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -95,7 +95,6 @@  static inline bool kasan_hw_tags_enabled(void)
 }
 
 void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
-void kasan_free_pages(struct page *page, unsigned int order);
 
 #else /* CONFIG_KASAN_HW_TAGS */
 
@@ -116,13 +115,6 @@  static __always_inline void kasan_alloc_pages(struct page *page,
 	BUILD_BUG();
 }
 
-static __always_inline void kasan_free_pages(struct page *page,
-					     unsigned int order)
-{
-	/* Only available for integrated init. */
-	BUILD_BUG();
-}
-
 #endif /* CONFIG_KASAN_HW_TAGS */
 
 static inline bool kasan_has_integrated_init(void)
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 8428da2aaf17..66078cc1b4f0 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -387,7 +387,7 @@  static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
 	}
 
 	/*
-	 * The object will be poisoned by kasan_free_pages() or
+	 * The object will be poisoned by kasan_poison_pages() or
 	 * kasan_slab_free_mempool().
 	 */
 
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 7355cb534e4f..0b8225add2e4 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -213,17 +213,6 @@  void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
 	}
 }
 
-void kasan_free_pages(struct page *page, unsigned int order)
-{
-	/*
-	 * This condition should match the one in free_pages_prepare() in
-	 * page_alloc.c.
-	 */
-	bool init = want_init_on_free();
-
-	kasan_poison_pages(page, order, init);
-}
-
 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
 
 void kasan_enable_tagging_sync(void)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3589333b5b77..3f3ea41f8c64 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1353,15 +1353,17 @@  static __always_inline bool free_pages_prepare(struct page *page,
 
 	/*
 	 * As memory initialization might be integrated into KASAN,
-	 * kasan_free_pages and kernel_init_free_pages must be
+	 * KASAN poisoning and memory initialization code must be
 	 * kept together to avoid discrepancies in behavior.
 	 *
 	 * With hardware tag-based KASAN, memory tags must be set before the
 	 * page becomes unavailable via debug_pagealloc or arch_free_page.
 	 */
 	if (kasan_has_integrated_init()) {
+		bool init = want_init_on_free();
+
 		if (!skip_kasan_poison)
-			kasan_free_pages(page, order);
+			kasan_poison_pages(page, order, init);
 	} else {
 		bool init = want_init_on_free();