diff mbox series

[v3,1/3] kasan: use separate (un)poison implementation for integrated init

Message ID 78af73393175c648b4eb10312825612f6e6889f6.1620849613.git.pcc@google.com (mailing list archive)
State New, archived
Headers show
Series arm64: improve efficiency of setting tags for user pages | expand

Commit Message

Peter Collingbourne May 12, 2021, 8:09 p.m. UTC
Currently with integrated init page_alloc.c needs to know whether
kasan_alloc_pages() will zero initialize memory, but this will start
becoming more complicated once we start adding tag initialization
support for user pages. To avoid page_alloc.c needing to know more
details of what integrated init will do, move the unpoisoning logic
for integrated init into the HW tags implementation. Currently the
logic is identical but it will diverge in subsequent patches.

For symmetry do the same for poisoning although this logic will
be unaffected by subsequent patches.

Signed-off-by: Peter Collingbourne <pcc@google.com>
Link: https://linux-review.googlesource.com/id/I2c550234c6c4a893c48c18ff0c6ce658c7c67056
---
v3:
- use BUILD_BUG()

v2:
- fix build with KASAN disabled

 include/linux/kasan.h | 66 +++++++++++++++++++++++++++----------------
 mm/kasan/common.c     |  4 +--
 mm/kasan/hw_tags.c    | 14 +++++++++
 mm/mempool.c          |  6 ++--
 mm/page_alloc.c       | 56 +++++++++++++++++++-----------------
 5 files changed, 91 insertions(+), 55 deletions(-)

Comments

Andrey Konovalov May 25, 2021, 10 p.m. UTC | #1
On Wed, May 12, 2021 at 11:09 PM Peter Collingbourne <pcc@google.com> wrote:
>
> Currently with integrated init page_alloc.c needs to know whether
> kasan_alloc_pages() will zero initialize memory, but this will start
> becoming more complicated once we start adding tag initialization
> support for user pages. To avoid page_alloc.c needing to know more
> details of what integrated init will do, move the unpoisoning logic
> for integrated init into the HW tags implementation. Currently the
> logic is identical but it will diverge in subsequent patches.
>
> For symmetry do the same for poisoning although this logic will
> be unaffected by subsequent patches.
>
> Signed-off-by: Peter Collingbourne <pcc@google.com>
> Link: https://linux-review.googlesource.com/id/I2c550234c6c4a893c48c18ff0c6ce658c7c67056
> ---
> v3:
> - use BUILD_BUG()
>
> v2:
> - fix build with KASAN disabled
>
>  include/linux/kasan.h | 66 +++++++++++++++++++++++++++----------------
>  mm/kasan/common.c     |  4 +--
>  mm/kasan/hw_tags.c    | 14 +++++++++
>  mm/mempool.c          |  6 ++--
>  mm/page_alloc.c       | 56 +++++++++++++++++++-----------------
>  5 files changed, 91 insertions(+), 55 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index b1678a61e6a7..38061673e6ac 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -2,6 +2,7 @@
>  #ifndef _LINUX_KASAN_H
>  #define _LINUX_KASAN_H
>
> +#include <linux/bug.h>
>  #include <linux/static_key.h>
>  #include <linux/types.h>
>
> @@ -79,14 +80,6 @@ static inline void kasan_disable_current(void) {}
>
>  #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
>
> -#ifdef CONFIG_KASAN
> -
> -struct kasan_cache {
> -       int alloc_meta_offset;
> -       int free_meta_offset;
> -       bool is_kmalloc;
> -};
> -
>  #ifdef CONFIG_KASAN_HW_TAGS
>
>  DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
> @@ -101,11 +94,18 @@ static inline bool kasan_has_integrated_init(void)
>         return kasan_enabled();
>  }
>
> +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
> +void kasan_free_pages(struct page *page, unsigned int order);
> +
>  #else /* CONFIG_KASAN_HW_TAGS */
>
>  static inline bool kasan_enabled(void)
>  {
> +#ifdef CONFIG_KASAN
>         return true;
> +#else
> +       return false;
> +#endif
>  }
>
>  static inline bool kasan_has_integrated_init(void)
> @@ -113,8 +113,30 @@ static inline bool kasan_has_integrated_init(void)
>         return false;
>  }
>
> +static __always_inline void kasan_alloc_pages(struct page *page,
> +                                             unsigned int order, gfp_t flags)
> +{
> +       /* Only available for integrated init. */
> +       BUILD_BUG();
> +}
> +
> +static __always_inline void kasan_free_pages(struct page *page,
> +                                            unsigned int order)
> +{
> +       /* Only available for integrated init. */
> +       BUILD_BUG();
> +}
> +
>  #endif /* CONFIG_KASAN_HW_TAGS */
>
> +#ifdef CONFIG_KASAN
> +
> +struct kasan_cache {
> +       int alloc_meta_offset;
> +       int free_meta_offset;
> +       bool is_kmalloc;
> +};
> +
>  slab_flags_t __kasan_never_merge(void);
>  static __always_inline slab_flags_t kasan_never_merge(void)
>  {
> @@ -130,20 +152,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
>                 __kasan_unpoison_range(addr, size);
>  }
>
> -void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
> -static __always_inline void kasan_alloc_pages(struct page *page,
> +void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
> +static __always_inline void kasan_poison_pages(struct page *page,
>                                                 unsigned int order, bool init)
>  {
>         if (kasan_enabled())
> -               __kasan_alloc_pages(page, order, init);
> +               __kasan_poison_pages(page, order, init);
>  }
>
> -void __kasan_free_pages(struct page *page, unsigned int order, bool init);
> -static __always_inline void kasan_free_pages(struct page *page,
> -                                               unsigned int order, bool init)
> +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
> +static __always_inline void kasan_unpoison_pages(struct page *page,
> +                                                unsigned int order, bool init)
>  {
>         if (kasan_enabled())
> -               __kasan_free_pages(page, order, init);
> +               __kasan_unpoison_pages(page, order, init);
>  }
>
>  void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
> @@ -285,21 +307,15 @@ void kasan_restore_multi_shot(bool enabled);
>
>  #else /* CONFIG_KASAN */
>
> -static inline bool kasan_enabled(void)
> -{
> -       return false;
> -}
> -static inline bool kasan_has_integrated_init(void)
> -{
> -       return false;
> -}
>  static inline slab_flags_t kasan_never_merge(void)
>  {
>         return 0;
>  }
>  static inline void kasan_unpoison_range(const void *address, size_t size) {}
> -static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
> -static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
> +static inline void kasan_poison_pages(struct page *page, unsigned int order,
> +                                     bool init) {}
> +static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
> +                                       bool init) {}
>  static inline void kasan_cache_create(struct kmem_cache *cache,
>                                       unsigned int *size,
>                                       slab_flags_t *flags) {}
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 6bb87f2acd4e..0ecd293af344 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void)
>         return 0;
>  }
>
> -void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
> +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
>  {
>         u8 tag;
>         unsigned long i;
> @@ -111,7 +111,7 @@ void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
>         kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
>  }
>
> -void __kasan_free_pages(struct page *page, unsigned int order, bool init)
> +void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
>  {
>         if (likely(!PageHighMem(page)))
>                 kasan_poison(page_address(page), PAGE_SIZE << order,
> diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
> index 4004388b4e4b..45e552cb9172 100644
> --- a/mm/kasan/hw_tags.c
> +++ b/mm/kasan/hw_tags.c
> @@ -238,6 +238,20 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
>         return &alloc_meta->free_track[0];
>  }
>
> +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
> +{
> +       bool init = !want_init_on_free() && want_init_on_alloc(flags);

This check is now duplicated. One check here, the same one in
page_alloc.c. Please either add a helper that gets used in both
places, or at least a comment that the checks must be kept in sync.

> +
> +       kasan_unpoison_pages(page, order, init);
> +}
> +
> +void kasan_free_pages(struct page *page, unsigned int order)
> +{
> +       bool init = want_init_on_free();

Same here.

> +
> +       kasan_poison_pages(page, order, init);
> +}
> +
>  #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
>
>  void kasan_set_tagging_report_once(bool state)
> diff --git a/mm/mempool.c b/mm/mempool.c
> index a258cf4de575..0b8afbec3e35 100644
> --- a/mm/mempool.c
> +++ b/mm/mempool.c
> @@ -106,7 +106,8 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
>         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
>                 kasan_slab_free_mempool(element);
>         else if (pool->alloc == mempool_alloc_pages)
> -               kasan_free_pages(element, (unsigned long)pool->pool_data, false);
> +               kasan_poison_pages(element, (unsigned long)pool->pool_data,
> +                                  false);
>  }
>
>  static void kasan_unpoison_element(mempool_t *pool, void *element)
> @@ -114,7 +115,8 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
>         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
>                 kasan_unpoison_range(element, __ksize(element));
>         else if (pool->alloc == mempool_alloc_pages)
> -               kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
> +               kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
> +                                    false);
>  }
>
>  static __always_inline void add_element(mempool_t *pool, void *element)
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index aaa1655cf682..6e82a7f6fd6f 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -382,7 +382,7 @@ int page_group_by_mobility_disabled __read_mostly;
>  static DEFINE_STATIC_KEY_TRUE(deferred_pages);
>
>  /*
> - * Calling kasan_free_pages() only after deferred memory initialization
> + * Calling kasan_poison_pages() only after deferred memory initialization
>   * has completed. Poisoning pages during deferred memory init will greatly
>   * lengthen the process and cause problem in large memory systems as the
>   * deferred pages initialization is done with interrupt disabled.
> @@ -394,15 +394,11 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
>   * on-demand allocation and then freed again before the deferred pages
>   * initialization is done, but this is not likely to happen.
>   */
> -static inline void kasan_free_nondeferred_pages(struct page *page, int order,
> -                                               bool init, fpi_t fpi_flags)
> +static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
>  {
> -       if (static_branch_unlikely(&deferred_pages))
> -               return;
> -       if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> -                       (fpi_flags & FPI_SKIP_KASAN_POISON))
> -               return;
> -       kasan_free_pages(page, order, init);
> +       return static_branch_unlikely(&deferred_pages) ||
> +              (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> +               (fpi_flags & FPI_SKIP_KASAN_POISON));
>  }
>
>  /* Returns true if the struct page for the pfn is uninitialised */
> @@ -453,13 +449,10 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
>         return false;
>  }
>  #else
> -static inline void kasan_free_nondeferred_pages(struct page *page, int order,
> -                                               bool init, fpi_t fpi_flags)
> +static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
>  {
> -       if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> -                       (fpi_flags & FPI_SKIP_KASAN_POISON))
> -               return;
> -       kasan_free_pages(page, order, init);
> +       return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> +               (fpi_flags & FPI_SKIP_KASAN_POISON));
>  }
>
>  static inline bool early_page_uninitialised(unsigned long pfn)
> @@ -1245,7 +1238,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
>                         unsigned int order, bool check_free, fpi_t fpi_flags)
>  {
>         int bad = 0;
> -       bool init;
> +       bool skip_kasan_poison = should_skip_kasan_poison(fpi_flags);
>
>         VM_BUG_ON_PAGE(PageTail(page), page);
>
> @@ -1314,10 +1307,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
>          * With hardware tag-based KASAN, memory tags must be set before the
>          * page becomes unavailable via debug_pagealloc or arch_free_page.
>          */
> -       init = want_init_on_free();
> -       if (init && !kasan_has_integrated_init())
> -               kernel_init_free_pages(page, 1 << order);
> -       kasan_free_nondeferred_pages(page, order, init, fpi_flags);
> +       if (kasan_has_integrated_init()) {

Is it guaranteed that this branch will be eliminated when
kasan_has_integrated_init() is static inline returning false? I know
this works with macros, but I don't remember seeing cases with static
inline functions. I guess it's the same, but mentioning just in case
because BUILD_BUG() stood out.

> +               if (!skip_kasan_poison)
> +                       kasan_free_pages(page, order);
> +       } else {
> +               bool init = want_init_on_free();
> +
> +               if (init)
> +                       kernel_init_free_pages(page, 1 << order);
> +               if (!skip_kasan_poison)
> +                       kasan_poison_pages(page, order, init);
> +       }
>
>         /*
>          * arch_free_page() can make the page's contents inaccessible.  s390
> @@ -2324,8 +2324,6 @@ static bool check_new_pages(struct page *page, unsigned int order)
>  inline void post_alloc_hook(struct page *page, unsigned int order,
>                                 gfp_t gfp_flags)
>  {
> -       bool init;
> -
>         set_page_private(page, 0);
>         set_page_refcounted(page);
>
> @@ -2344,10 +2342,16 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
>          * kasan_alloc_pages and kernel_init_free_pages must be
>          * kept together to avoid discrepancies in behavior.
>          */
> -       init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
> -       kasan_alloc_pages(page, order, init);
> -       if (init && !kasan_has_integrated_init())
> -               kernel_init_free_pages(page, 1 << order);
> +       if (kasan_has_integrated_init()) {
> +               kasan_alloc_pages(page, order, gfp_flags);
> +       } else {
> +               bool init =
> +                       !want_init_on_free() && want_init_on_alloc(gfp_flags);
> +
> +               kasan_unpoison_pages(page, order, init);
> +               if (init)
> +                       kernel_init_free_pages(page, 1 << order);
> +       }
>
>         set_page_owner(page, order, gfp_flags);
>  }
> --
> 2.31.1.607.g51e8a6a459-goog
>
Marco Elver May 26, 2021, 10:12 a.m. UTC | #2
On Wed, May 12, 2021 at 01:09PM -0700, Peter Collingbourne wrote:
[...] 
> +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
> +void kasan_free_pages(struct page *page, unsigned int order);
> +
>  #else /* CONFIG_KASAN_HW_TAGS */
>  
>  static inline bool kasan_enabled(void)
>  {
> +#ifdef CONFIG_KASAN
>  	return true;
> +#else
> +	return false;
> +#endif
>  }

Just

	return IS_ENABLED(CONFIG_KASAN);

>  static inline bool kasan_has_integrated_init(void)
> @@ -113,8 +113,30 @@ static inline bool kasan_has_integrated_init(void)
>  	return false;
>  }
>  
> +static __always_inline void kasan_alloc_pages(struct page *page,
> +					      unsigned int order, gfp_t flags)
> +{
> +	/* Only available for integrated init. */
> +	BUILD_BUG();
> +}
> +
> +static __always_inline void kasan_free_pages(struct page *page,
> +					     unsigned int order)
> +{
> +	/* Only available for integrated init. */
> +	BUILD_BUG();
> +}

This *should* always work, as long as the compiler optimizes everything
like we expect.

But: In this case, I think this is sign that the interface design can be
improved. Can we just make kasan_{alloc,free}_pages() return a 'bool
__must_check' to indicate if kasan takes care of init?

The variants here would simply return kasan_has_integrated_init().

That way, there'd be no need for the BUILD_BUG()s and the interface
becomes harder to misuse by design.

Also, given that kasan_{alloc,free}_pages() initializes memory, this is
an opportunity to just give them a better name. Perhaps

	/* Returns true if KASAN took care of initialization, false otherwise. */
	bool __must_check kasan_alloc_pages_try_init(struct page *page, unsigned int order, gfp_t flags);
	bool __must_check kasan_free_pages_try_init(struct page *page, unsigned int order);

[...]
> -	init = want_init_on_free();
> -	if (init && !kasan_has_integrated_init())
> -		kernel_init_free_pages(page, 1 << order);
> -	kasan_free_nondeferred_pages(page, order, init, fpi_flags);
> +	if (kasan_has_integrated_init()) {
> +		if (!skip_kasan_poison)
> +			kasan_free_pages(page, order);

I think kasan_free_pages() could return a bool, and this would become

	if (skip_kasan_poison || !kasan_free_pages(...)) {
		...

> +	} else {
> +		bool init = want_init_on_free();
> +
> +		if (init)
> +			kernel_init_free_pages(page, 1 << order);
> +		if (!skip_kasan_poison)
> +			kasan_poison_pages(page, order, init);
> +	}
>  
>  	/*
>  	 * arch_free_page() can make the page's contents inaccessible.  s390
> @@ -2324,8 +2324,6 @@ static bool check_new_pages(struct page *page, unsigned int order)
>  inline void post_alloc_hook(struct page *page, unsigned int order,
>  				gfp_t gfp_flags)
>  {
> -	bool init;
> -
>  	set_page_private(page, 0);
>  	set_page_refcounted(page);
>  
> @@ -2344,10 +2342,16 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
>  	 * kasan_alloc_pages and kernel_init_free_pages must be
>  	 * kept together to avoid discrepancies in behavior.
>  	 */
> -	init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
> -	kasan_alloc_pages(page, order, init);
> -	if (init && !kasan_has_integrated_init())
> -		kernel_init_free_pages(page, 1 << order);
> +	if (kasan_has_integrated_init()) {
> +		kasan_alloc_pages(page, order, gfp_flags);

It looks to me that kasan_alloc_pages() could return a bool, and this
would become

	if (!kasan_alloc_pages(...)) {
		...

> +	} else {
> +		bool init =
> +			!want_init_on_free() && want_init_on_alloc(gfp_flags);
> +

[ No need for line-break (for cases like this the kernel is fine with up
to 100 cols if it improves readability). ]

> +		kasan_unpoison_pages(page, order, init);
> +		if (init)
> +			kernel_init_free_pages(page, 1 << order);
> +	}

Thoughts?

Thanks,
-- Marco
Peter Collingbourne May 26, 2021, 7:27 p.m. UTC | #3
On Wed, May 26, 2021 at 3:12 AM Marco Elver <elver@google.com> wrote:
>
> On Wed, May 12, 2021 at 01:09PM -0700, Peter Collingbourne wrote:
> [...]
> > +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
> > +void kasan_free_pages(struct page *page, unsigned int order);
> > +
> >  #else /* CONFIG_KASAN_HW_TAGS */
> >
> >  static inline bool kasan_enabled(void)
> >  {
> > +#ifdef CONFIG_KASAN
> >       return true;
> > +#else
> > +     return false;
> > +#endif
> >  }
>
> Just
>
>         return IS_ENABLED(CONFIG_KASAN);

Will do.

> >  static inline bool kasan_has_integrated_init(void)
> > @@ -113,8 +113,30 @@ static inline bool kasan_has_integrated_init(void)
> >       return false;
> >  }
> >
> > +static __always_inline void kasan_alloc_pages(struct page *page,
> > +                                           unsigned int order, gfp_t flags)
> > +{
> > +     /* Only available for integrated init. */
> > +     BUILD_BUG();
> > +}
> > +
> > +static __always_inline void kasan_free_pages(struct page *page,
> > +                                          unsigned int order)
> > +{
> > +     /* Only available for integrated init. */
> > +     BUILD_BUG();
> > +}
>
> This *should* always work, as long as the compiler optimizes everything
> like we expect.

Yeah, as I mentioned to Catalin on an earlier revision I'm not a fan
of relying on the compiler optimizing this away, but it looks like
we're already relying on this elsewhere in the kernel.

> But: In this case, I think this is sign that the interface design can be
> improved. Can we just make kasan_{alloc,free}_pages() return a 'bool
> __must_check' to indicate if kasan takes care of init?

I considered a number of different approaches including something like
that before settling on the one in this patch. One consideration was
that we should avoid involving KASAN in normal execution as much as
possible, in order to make the normal code path as comprehensible as
possible. With an approach where alloc/free return a bool the reader
needs to understand what the KASAN alloc/free functions do in the
normal case. Whereas with an approach where an "accessor" function on
the KASAN side returns a bool, it's more obvious that the code has a
"normal path" and a "KASAN path", and readers who only care about the
normal path can ignore the KASAN path.

Does that make sense? I don't feel too strongly so I can change
alloc/free to return a bool if you don't agree.

> The variants here would simply return kasan_has_integrated_init().
>
> That way, there'd be no need for the BUILD_BUG()s and the interface
> becomes harder to misuse by design.
>
> Also, given that kasan_{alloc,free}_pages() initializes memory, this is
> an opportunity to just give them a better name. Perhaps
>
>         /* Returns true if KASAN took care of initialization, false otherwise. */
>         bool __must_check kasan_alloc_pages_try_init(struct page *page, unsigned int order, gfp_t flags);
>         bool __must_check kasan_free_pages_try_init(struct page *page, unsigned int order);

I considered changing the name but concluded that we probably
shouldn't try to pack too much information into the name. With a code
flow like:

if (kasan_has_integrated_init()) {
  kasan_alloc_pages();
} else {
  kernel_init_free_pages();
}

I think it's probably clear enough that kasan_alloc_pages() is doing
the stuff in kernel_init_free_pages() as well.

> [...]
> > -     init = want_init_on_free();
> > -     if (init && !kasan_has_integrated_init())
> > -             kernel_init_free_pages(page, 1 << order);
> > -     kasan_free_nondeferred_pages(page, order, init, fpi_flags);
> > +     if (kasan_has_integrated_init()) {
> > +             if (!skip_kasan_poison)
> > +                     kasan_free_pages(page, order);
>
> I think kasan_free_pages() could return a bool, and this would become
>
>         if (skip_kasan_poison || !kasan_free_pages(...)) {
>                 ...
>
> > +     } else {
> > +             bool init = want_init_on_free();
> > +
> > +             if (init)
> > +                     kernel_init_free_pages(page, 1 << order);
> > +             if (!skip_kasan_poison)
> > +                     kasan_poison_pages(page, order, init);
> > +     }
> >
> >       /*
> >        * arch_free_page() can make the page's contents inaccessible.  s390
> > @@ -2324,8 +2324,6 @@ static bool check_new_pages(struct page *page, unsigned int order)
> >  inline void post_alloc_hook(struct page *page, unsigned int order,
> >                               gfp_t gfp_flags)
> >  {
> > -     bool init;
> > -
> >       set_page_private(page, 0);
> >       set_page_refcounted(page);
> >
> > @@ -2344,10 +2342,16 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
> >        * kasan_alloc_pages and kernel_init_free_pages must be
> >        * kept together to avoid discrepancies in behavior.
> >        */
> > -     init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
> > -     kasan_alloc_pages(page, order, init);
> > -     if (init && !kasan_has_integrated_init())
> > -             kernel_init_free_pages(page, 1 << order);
> > +     if (kasan_has_integrated_init()) {
> > +             kasan_alloc_pages(page, order, gfp_flags);
>
> It looks to me that kasan_alloc_pages() could return a bool, and this
> would become
>
>         if (!kasan_alloc_pages(...)) {
>                 ...
>
> > +     } else {
> > +             bool init =
> > +                     !want_init_on_free() && want_init_on_alloc(gfp_flags);
> > +
>
> [ No need for line-break (for cases like this the kernel is fine with up
> to 100 cols if it improves readability). ]

Okay, I'll make that change.

Peter
Marco Elver May 26, 2021, 7:54 p.m. UTC | #4
On Wed, 26 May 2021 at 21:28, Peter Collingbourne <pcc@google.com> wrote:
[...]
> > >  static inline bool kasan_has_integrated_init(void)
> > > @@ -113,8 +113,30 @@ static inline bool kasan_has_integrated_init(void)
> > >       return false;
> > >  }
> > >
> > > +static __always_inline void kasan_alloc_pages(struct page *page,
> > > +                                           unsigned int order, gfp_t flags)
> > > +{
> > > +     /* Only available for integrated init. */
> > > +     BUILD_BUG();
> > > +}
> > > +
> > > +static __always_inline void kasan_free_pages(struct page *page,
> > > +                                          unsigned int order)
> > > +{
> > > +     /* Only available for integrated init. */
> > > +     BUILD_BUG();
> > > +}
> >
> > This *should* always work, as long as the compiler optimizes everything
> > like we expect.
>
> Yeah, as I mentioned to Catalin on an earlier revision I'm not a fan
> of relying on the compiler optimizing this away, but it looks like
> we're already relying on this elsewhere in the kernel.

That's true, and it's also how BUILD_BUG() works underneath (it calls
a  __attribute__((error(msg))) function guarded by a condition, or in
this case without a condition...  new code should usually use
static_assert() but that's obviously not possible here). In fact, if
the kernel is built without optimizations, BUILD_BUG() turns into
no-ops.

And just in case, I do not mind the BUILD_BUG(), because it should always work.

> > But: In this case, I think this is sign that the interface design can be
> > improved. Can we just make kasan_{alloc,free}_pages() return a 'bool
> > __must_check' to indicate if kasan takes care of init?
>
> I considered a number of different approaches including something like
> that before settling on the one in this patch. One consideration was
> that we should avoid involving KASAN in normal execution as much as
> possible, in order to make the normal code path as comprehensible as
> possible. With an approach where alloc/free return a bool the reader
> needs to understand what the KASAN alloc/free functions do in the
> normal case. Whereas with an approach where an "accessor" function on
> the KASAN side returns a bool, it's more obvious that the code has a
> "normal path" and a "KASAN path", and readers who only care about the
> normal path can ignore the KASAN path.
>
> Does that make sense? I don't feel too strongly so I can change
> alloc/free to return a bool if you don't agree.

If this had been considered, then that's fair. I just wanted to point
it out in case it hadn't.

Let's leave as-is.

I also just noticed that we also pass 'init' to kasan_poison_pages(..,
init) in the !kasan_has_integrated_init() case which might be
confusing.

Thanks,
-- Marco
Peter Collingbourne May 28, 2021, 1:04 a.m. UTC | #5
On Tue, May 25, 2021 at 3:00 PM Andrey Konovalov <andreyknvl@gmail.com> wrote:
>
> On Wed, May 12, 2021 at 11:09 PM Peter Collingbourne <pcc@google.com> wrote:
> >
> > Currently with integrated init page_alloc.c needs to know whether
> > kasan_alloc_pages() will zero initialize memory, but this will start
> > becoming more complicated once we start adding tag initialization
> > support for user pages. To avoid page_alloc.c needing to know more
> > details of what integrated init will do, move the unpoisoning logic
> > for integrated init into the HW tags implementation. Currently the
> > logic is identical but it will diverge in subsequent patches.
> >
> > For symmetry do the same for poisoning although this logic will
> > be unaffected by subsequent patches.
> >
> > Signed-off-by: Peter Collingbourne <pcc@google.com>
> > Link: https://linux-review.googlesource.com/id/I2c550234c6c4a893c48c18ff0c6ce658c7c67056
> > ---
> > v3:
> > - use BUILD_BUG()
> >
> > v2:
> > - fix build with KASAN disabled
> >
> >  include/linux/kasan.h | 66 +++++++++++++++++++++++++++----------------
> >  mm/kasan/common.c     |  4 +--
> >  mm/kasan/hw_tags.c    | 14 +++++++++
> >  mm/mempool.c          |  6 ++--
> >  mm/page_alloc.c       | 56 +++++++++++++++++++-----------------
> >  5 files changed, 91 insertions(+), 55 deletions(-)
> >
> > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > index b1678a61e6a7..38061673e6ac 100644
> > --- a/include/linux/kasan.h
> > +++ b/include/linux/kasan.h
> > @@ -2,6 +2,7 @@
> >  #ifndef _LINUX_KASAN_H
> >  #define _LINUX_KASAN_H
> >
> > +#include <linux/bug.h>
> >  #include <linux/static_key.h>
> >  #include <linux/types.h>
> >
> > @@ -79,14 +80,6 @@ static inline void kasan_disable_current(void) {}
> >
> >  #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
> >
> > -#ifdef CONFIG_KASAN
> > -
> > -struct kasan_cache {
> > -       int alloc_meta_offset;
> > -       int free_meta_offset;
> > -       bool is_kmalloc;
> > -};
> > -
> >  #ifdef CONFIG_KASAN_HW_TAGS
> >
> >  DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
> > @@ -101,11 +94,18 @@ static inline bool kasan_has_integrated_init(void)
> >         return kasan_enabled();
> >  }
> >
> > +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
> > +void kasan_free_pages(struct page *page, unsigned int order);
> > +
> >  #else /* CONFIG_KASAN_HW_TAGS */
> >
> >  static inline bool kasan_enabled(void)
> >  {
> > +#ifdef CONFIG_KASAN
> >         return true;
> > +#else
> > +       return false;
> > +#endif
> >  }
> >
> >  static inline bool kasan_has_integrated_init(void)
> > @@ -113,8 +113,30 @@ static inline bool kasan_has_integrated_init(void)
> >         return false;
> >  }
> >
> > +static __always_inline void kasan_alloc_pages(struct page *page,
> > +                                             unsigned int order, gfp_t flags)
> > +{
> > +       /* Only available for integrated init. */
> > +       BUILD_BUG();
> > +}
> > +
> > +static __always_inline void kasan_free_pages(struct page *page,
> > +                                            unsigned int order)
> > +{
> > +       /* Only available for integrated init. */
> > +       BUILD_BUG();
> > +}
> > +
> >  #endif /* CONFIG_KASAN_HW_TAGS */
> >
> > +#ifdef CONFIG_KASAN
> > +
> > +struct kasan_cache {
> > +       int alloc_meta_offset;
> > +       int free_meta_offset;
> > +       bool is_kmalloc;
> > +};
> > +
> >  slab_flags_t __kasan_never_merge(void);
> >  static __always_inline slab_flags_t kasan_never_merge(void)
> >  {
> > @@ -130,20 +152,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
> >                 __kasan_unpoison_range(addr, size);
> >  }
> >
> > -void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
> > -static __always_inline void kasan_alloc_pages(struct page *page,
> > +void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
> > +static __always_inline void kasan_poison_pages(struct page *page,
> >                                                 unsigned int order, bool init)
> >  {
> >         if (kasan_enabled())
> > -               __kasan_alloc_pages(page, order, init);
> > +               __kasan_poison_pages(page, order, init);
> >  }
> >
> > -void __kasan_free_pages(struct page *page, unsigned int order, bool init);
> > -static __always_inline void kasan_free_pages(struct page *page,
> > -                                               unsigned int order, bool init)
> > +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
> > +static __always_inline void kasan_unpoison_pages(struct page *page,
> > +                                                unsigned int order, bool init)
> >  {
> >         if (kasan_enabled())
> > -               __kasan_free_pages(page, order, init);
> > +               __kasan_unpoison_pages(page, order, init);
> >  }
> >
> >  void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
> > @@ -285,21 +307,15 @@ void kasan_restore_multi_shot(bool enabled);
> >
> >  #else /* CONFIG_KASAN */
> >
> > -static inline bool kasan_enabled(void)
> > -{
> > -       return false;
> > -}
> > -static inline bool kasan_has_integrated_init(void)
> > -{
> > -       return false;
> > -}
> >  static inline slab_flags_t kasan_never_merge(void)
> >  {
> >         return 0;
> >  }
> >  static inline void kasan_unpoison_range(const void *address, size_t size) {}
> > -static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
> > -static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
> > +static inline void kasan_poison_pages(struct page *page, unsigned int order,
> > +                                     bool init) {}
> > +static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
> > +                                       bool init) {}
> >  static inline void kasan_cache_create(struct kmem_cache *cache,
> >                                       unsigned int *size,
> >                                       slab_flags_t *flags) {}
> > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > index 6bb87f2acd4e..0ecd293af344 100644
> > --- a/mm/kasan/common.c
> > +++ b/mm/kasan/common.c
> > @@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void)
> >         return 0;
> >  }
> >
> > -void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
> > +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
> >  {
> >         u8 tag;
> >         unsigned long i;
> > @@ -111,7 +111,7 @@ void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
> >         kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
> >  }
> >
> > -void __kasan_free_pages(struct page *page, unsigned int order, bool init)
> > +void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
> >  {
> >         if (likely(!PageHighMem(page)))
> >                 kasan_poison(page_address(page), PAGE_SIZE << order,
> > diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
> > index 4004388b4e4b..45e552cb9172 100644
> > --- a/mm/kasan/hw_tags.c
> > +++ b/mm/kasan/hw_tags.c
> > @@ -238,6 +238,20 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
> >         return &alloc_meta->free_track[0];
> >  }
> >
> > +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
> > +{
> > +       bool init = !want_init_on_free() && want_init_on_alloc(flags);
>
> This check is now duplicated. One check here, the same one in
> page_alloc.c. Please either add a helper that gets used in both
> places, or at least a comment that the checks must be kept in sync.

Added a comment in v4.

> > +
> > +       kasan_unpoison_pages(page, order, init);
> > +}
> > +
> > +void kasan_free_pages(struct page *page, unsigned int order)
> > +{
> > +       bool init = want_init_on_free();
>
> Same here.

Likewise.

> > +
> > +       kasan_poison_pages(page, order, init);
> > +}
> > +
> >  #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
> >
> >  void kasan_set_tagging_report_once(bool state)
> > diff --git a/mm/mempool.c b/mm/mempool.c
> > index a258cf4de575..0b8afbec3e35 100644
> > --- a/mm/mempool.c
> > +++ b/mm/mempool.c
> > @@ -106,7 +106,8 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
> >         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
> >                 kasan_slab_free_mempool(element);
> >         else if (pool->alloc == mempool_alloc_pages)
> > -               kasan_free_pages(element, (unsigned long)pool->pool_data, false);
> > +               kasan_poison_pages(element, (unsigned long)pool->pool_data,
> > +                                  false);
> >  }
> >
> >  static void kasan_unpoison_element(mempool_t *pool, void *element)
> > @@ -114,7 +115,8 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
> >         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
> >                 kasan_unpoison_range(element, __ksize(element));
> >         else if (pool->alloc == mempool_alloc_pages)
> > -               kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
> > +               kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
> > +                                    false);
> >  }
> >
> >  static __always_inline void add_element(mempool_t *pool, void *element)
> > diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> > index aaa1655cf682..6e82a7f6fd6f 100644
> > --- a/mm/page_alloc.c
> > +++ b/mm/page_alloc.c
> > @@ -382,7 +382,7 @@ int page_group_by_mobility_disabled __read_mostly;
> >  static DEFINE_STATIC_KEY_TRUE(deferred_pages);
> >
> >  /*
> > - * Calling kasan_free_pages() only after deferred memory initialization
> > + * Calling kasan_poison_pages() only after deferred memory initialization
> >   * has completed. Poisoning pages during deferred memory init will greatly
> >   * lengthen the process and cause problem in large memory systems as the
> >   * deferred pages initialization is done with interrupt disabled.
> > @@ -394,15 +394,11 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
> >   * on-demand allocation and then freed again before the deferred pages
> >   * initialization is done, but this is not likely to happen.
> >   */
> > -static inline void kasan_free_nondeferred_pages(struct page *page, int order,
> > -                                               bool init, fpi_t fpi_flags)
> > +static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
> >  {
> > -       if (static_branch_unlikely(&deferred_pages))
> > -               return;
> > -       if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> > -                       (fpi_flags & FPI_SKIP_KASAN_POISON))
> > -               return;
> > -       kasan_free_pages(page, order, init);
> > +       return static_branch_unlikely(&deferred_pages) ||
> > +              (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> > +               (fpi_flags & FPI_SKIP_KASAN_POISON));
> >  }
> >
> >  /* Returns true if the struct page for the pfn is uninitialised */
> > @@ -453,13 +449,10 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
> >         return false;
> >  }
> >  #else
> > -static inline void kasan_free_nondeferred_pages(struct page *page, int order,
> > -                                               bool init, fpi_t fpi_flags)
> > +static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
> >  {
> > -       if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> > -                       (fpi_flags & FPI_SKIP_KASAN_POISON))
> > -               return;
> > -       kasan_free_pages(page, order, init);
> > +       return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> > +               (fpi_flags & FPI_SKIP_KASAN_POISON));
> >  }
> >
> >  static inline bool early_page_uninitialised(unsigned long pfn)
> > @@ -1245,7 +1238,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
> >                         unsigned int order, bool check_free, fpi_t fpi_flags)
> >  {
> >         int bad = 0;
> > -       bool init;
> > +       bool skip_kasan_poison = should_skip_kasan_poison(fpi_flags);
> >
> >         VM_BUG_ON_PAGE(PageTail(page), page);
> >
> > @@ -1314,10 +1307,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
> >          * With hardware tag-based KASAN, memory tags must be set before the
> >          * page becomes unavailable via debug_pagealloc or arch_free_page.
> >          */
> > -       init = want_init_on_free();
> > -       if (init && !kasan_has_integrated_init())
> > -               kernel_init_free_pages(page, 1 << order);
> > -       kasan_free_nondeferred_pages(page, order, init, fpi_flags);
> > +       if (kasan_has_integrated_init()) {
>
> Is it guaranteed that this branch will be eliminated when
> kasan_has_integrated_init() is static inline returning false? I know
> this works with macros, but I don't remember seeing cases with static
> inline functions. I guess it's the same, but mentioning just in case
> because BUILD_BUG() stood out.

Here's one example of where we rely on optimization after inlining to
eliminate a BUILD_BUG():
https://github.com/torvalds/linux/blob/3224374f7eb08fbb36d3963895da20ff274b8e6a/arch/arm64/include/asm/arm_dsu_pmu.h#L123

Peter
diff mbox series

Patch

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index b1678a61e6a7..38061673e6ac 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -2,6 +2,7 @@ 
 #ifndef _LINUX_KASAN_H
 #define _LINUX_KASAN_H
 
+#include <linux/bug.h>
 #include <linux/static_key.h>
 #include <linux/types.h>
 
@@ -79,14 +80,6 @@  static inline void kasan_disable_current(void) {}
 
 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 
-#ifdef CONFIG_KASAN
-
-struct kasan_cache {
-	int alloc_meta_offset;
-	int free_meta_offset;
-	bool is_kmalloc;
-};
-
 #ifdef CONFIG_KASAN_HW_TAGS
 
 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
@@ -101,11 +94,18 @@  static inline bool kasan_has_integrated_init(void)
 	return kasan_enabled();
 }
 
+void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
+void kasan_free_pages(struct page *page, unsigned int order);
+
 #else /* CONFIG_KASAN_HW_TAGS */
 
 static inline bool kasan_enabled(void)
 {
+#ifdef CONFIG_KASAN
 	return true;
+#else
+	return false;
+#endif
 }
 
 static inline bool kasan_has_integrated_init(void)
@@ -113,8 +113,30 @@  static inline bool kasan_has_integrated_init(void)
 	return false;
 }
 
+static __always_inline void kasan_alloc_pages(struct page *page,
+					      unsigned int order, gfp_t flags)
+{
+	/* Only available for integrated init. */
+	BUILD_BUG();
+}
+
+static __always_inline void kasan_free_pages(struct page *page,
+					     unsigned int order)
+{
+	/* Only available for integrated init. */
+	BUILD_BUG();
+}
+
 #endif /* CONFIG_KASAN_HW_TAGS */
 
+#ifdef CONFIG_KASAN
+
+struct kasan_cache {
+	int alloc_meta_offset;
+	int free_meta_offset;
+	bool is_kmalloc;
+};
+
 slab_flags_t __kasan_never_merge(void);
 static __always_inline slab_flags_t kasan_never_merge(void)
 {
@@ -130,20 +152,20 @@  static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
 		__kasan_unpoison_range(addr, size);
 }
 
-void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
-static __always_inline void kasan_alloc_pages(struct page *page,
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_poison_pages(struct page *page,
 						unsigned int order, bool init)
 {
 	if (kasan_enabled())
-		__kasan_alloc_pages(page, order, init);
+		__kasan_poison_pages(page, order, init);
 }
 
-void __kasan_free_pages(struct page *page, unsigned int order, bool init);
-static __always_inline void kasan_free_pages(struct page *page,
-						unsigned int order, bool init)
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_unpoison_pages(struct page *page,
+						 unsigned int order, bool init)
 {
 	if (kasan_enabled())
-		__kasan_free_pages(page, order, init);
+		__kasan_unpoison_pages(page, order, init);
 }
 
 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
@@ -285,21 +307,15 @@  void kasan_restore_multi_shot(bool enabled);
 
 #else /* CONFIG_KASAN */
 
-static inline bool kasan_enabled(void)
-{
-	return false;
-}
-static inline bool kasan_has_integrated_init(void)
-{
-	return false;
-}
 static inline slab_flags_t kasan_never_merge(void)
 {
 	return 0;
 }
 static inline void kasan_unpoison_range(const void *address, size_t size) {}
-static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
-static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
+static inline void kasan_poison_pages(struct page *page, unsigned int order,
+				      bool init) {}
+static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
+					bool init) {}
 static inline void kasan_cache_create(struct kmem_cache *cache,
 				      unsigned int *size,
 				      slab_flags_t *flags) {}
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 6bb87f2acd4e..0ecd293af344 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -97,7 +97,7 @@  slab_flags_t __kasan_never_merge(void)
 	return 0;
 }
 
-void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
 {
 	u8 tag;
 	unsigned long i;
@@ -111,7 +111,7 @@  void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
 	kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
 }
 
-void __kasan_free_pages(struct page *page, unsigned int order, bool init)
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
 {
 	if (likely(!PageHighMem(page)))
 		kasan_poison(page_address(page), PAGE_SIZE << order,
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 4004388b4e4b..45e552cb9172 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -238,6 +238,20 @@  struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
 	return &alloc_meta->free_track[0];
 }
 
+void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
+{
+	bool init = !want_init_on_free() && want_init_on_alloc(flags);
+
+	kasan_unpoison_pages(page, order, init);
+}
+
+void kasan_free_pages(struct page *page, unsigned int order)
+{
+	bool init = want_init_on_free();
+
+	kasan_poison_pages(page, order, init);
+}
+
 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
 
 void kasan_set_tagging_report_once(bool state)
diff --git a/mm/mempool.c b/mm/mempool.c
index a258cf4de575..0b8afbec3e35 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -106,7 +106,8 @@  static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
 		kasan_slab_free_mempool(element);
 	else if (pool->alloc == mempool_alloc_pages)
-		kasan_free_pages(element, (unsigned long)pool->pool_data, false);
+		kasan_poison_pages(element, (unsigned long)pool->pool_data,
+				   false);
 }
 
 static void kasan_unpoison_element(mempool_t *pool, void *element)
@@ -114,7 +115,8 @@  static void kasan_unpoison_element(mempool_t *pool, void *element)
 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
 		kasan_unpoison_range(element, __ksize(element));
 	else if (pool->alloc == mempool_alloc_pages)
-		kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
+		kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
+				     false);
 }
 
 static __always_inline void add_element(mempool_t *pool, void *element)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aaa1655cf682..6e82a7f6fd6f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -382,7 +382,7 @@  int page_group_by_mobility_disabled __read_mostly;
 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
 
 /*
- * Calling kasan_free_pages() only after deferred memory initialization
+ * Calling kasan_poison_pages() only after deferred memory initialization
  * has completed. Poisoning pages during deferred memory init will greatly
  * lengthen the process and cause problem in large memory systems as the
  * deferred pages initialization is done with interrupt disabled.
@@ -394,15 +394,11 @@  static DEFINE_STATIC_KEY_TRUE(deferred_pages);
  * on-demand allocation and then freed again before the deferred pages
  * initialization is done, but this is not likely to happen.
  */
-static inline void kasan_free_nondeferred_pages(struct page *page, int order,
-						bool init, fpi_t fpi_flags)
+static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
 {
-	if (static_branch_unlikely(&deferred_pages))
-		return;
-	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
-			(fpi_flags & FPI_SKIP_KASAN_POISON))
-		return;
-	kasan_free_pages(page, order, init);
+	return static_branch_unlikely(&deferred_pages) ||
+	       (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+		(fpi_flags & FPI_SKIP_KASAN_POISON));
 }
 
 /* Returns true if the struct page for the pfn is uninitialised */
@@ -453,13 +449,10 @@  defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 	return false;
 }
 #else
-static inline void kasan_free_nondeferred_pages(struct page *page, int order,
-						bool init, fpi_t fpi_flags)
+static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
 {
-	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
-			(fpi_flags & FPI_SKIP_KASAN_POISON))
-		return;
-	kasan_free_pages(page, order, init);
+	return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+		(fpi_flags & FPI_SKIP_KASAN_POISON));
 }
 
 static inline bool early_page_uninitialised(unsigned long pfn)
@@ -1245,7 +1238,7 @@  static __always_inline bool free_pages_prepare(struct page *page,
 			unsigned int order, bool check_free, fpi_t fpi_flags)
 {
 	int bad = 0;
-	bool init;
+	bool skip_kasan_poison = should_skip_kasan_poison(fpi_flags);
 
 	VM_BUG_ON_PAGE(PageTail(page), page);
 
@@ -1314,10 +1307,17 @@  static __always_inline bool free_pages_prepare(struct page *page,
 	 * With hardware tag-based KASAN, memory tags must be set before the
 	 * page becomes unavailable via debug_pagealloc or arch_free_page.
 	 */
-	init = want_init_on_free();
-	if (init && !kasan_has_integrated_init())
-		kernel_init_free_pages(page, 1 << order);
-	kasan_free_nondeferred_pages(page, order, init, fpi_flags);
+	if (kasan_has_integrated_init()) {
+		if (!skip_kasan_poison)
+			kasan_free_pages(page, order);
+	} else {
+		bool init = want_init_on_free();
+
+		if (init)
+			kernel_init_free_pages(page, 1 << order);
+		if (!skip_kasan_poison)
+			kasan_poison_pages(page, order, init);
+	}
 
 	/*
 	 * arch_free_page() can make the page's contents inaccessible.  s390
@@ -2324,8 +2324,6 @@  static bool check_new_pages(struct page *page, unsigned int order)
 inline void post_alloc_hook(struct page *page, unsigned int order,
 				gfp_t gfp_flags)
 {
-	bool init;
-
 	set_page_private(page, 0);
 	set_page_refcounted(page);
 
@@ -2344,10 +2342,16 @@  inline void post_alloc_hook(struct page *page, unsigned int order,
 	 * kasan_alloc_pages and kernel_init_free_pages must be
 	 * kept together to avoid discrepancies in behavior.
 	 */
-	init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
-	kasan_alloc_pages(page, order, init);
-	if (init && !kasan_has_integrated_init())
-		kernel_init_free_pages(page, 1 << order);
+	if (kasan_has_integrated_init()) {
+		kasan_alloc_pages(page, order, gfp_flags);
+	} else {
+		bool init =
+			!want_init_on_free() && want_init_on_alloc(gfp_flags);
+
+		kasan_unpoison_pages(page, order, init);
+		if (init)
+			kernel_init_free_pages(page, 1 << order);
+	}
 
 	set_page_owner(page, order, gfp_flags);
 }