diff mbox series

[RFC,v2,16/21] kasan: optimize poisoning in kmalloc and krealloc

Message ID ce573435398f21d3e604f104c29ba65eca70d9e7.1603372719.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan: hardware tag-based mode for production use on arm64 | expand

Commit Message

Andrey Konovalov Oct. 22, 2020, 1:19 p.m. UTC
Since kasan_kmalloc() always follows kasan_slab_alloc(), there's no need
to reunpoison the object data, only to poison the redzone.

This requires changing kasan annotation for early SLUB cache to
kasan_slab_alloc(). Otherwise kasan_kmalloc() doesn't untag the object.
This doesn't do any functional changes, as kmem_cache_node->object_size
is equal to sizeof(struct kmem_cache_node).

Similarly for kasan_krealloc(), as it's called after ksize(), which
already unpoisoned the object, there's no need to do it again.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Link: https://linux-review.googlesource.com/id/I4083d3b55605f70fef79bca9b90843c4390296f2
---
 mm/kasan/common.c | 31 +++++++++++++++++++++----------
 mm/slub.c         |  3 +--
 2 files changed, 22 insertions(+), 12 deletions(-)

Comments

Dmitry Vyukov Oct. 28, 2020, 4:55 p.m. UTC | #1
On Thu, Oct 22, 2020 at 3:20 PM Andrey Konovalov <andreyknvl@google.com> wrote:
>
> Since kasan_kmalloc() always follows kasan_slab_alloc(), there's no need
> to reunpoison the object data, only to poison the redzone.
>
> This requires changing kasan annotation for early SLUB cache to
> kasan_slab_alloc(). Otherwise kasan_kmalloc() doesn't untag the object.
> This doesn't do any functional changes, as kmem_cache_node->object_size
> is equal to sizeof(struct kmem_cache_node).
>
> Similarly for kasan_krealloc(), as it's called after ksize(), which
> already unpoisoned the object, there's no need to do it again.

Have you considered doing this the other way around: make krealloc
call __ksize and unpoison in kasan_krealloc?
This has the advantage of more precise poisoning as ksize will
unpoison the whole underlying object.

But then maybe we will need to move first checks in ksize into __ksize
as we may need them in krealloc as well.





> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> Link: https://linux-review.googlesource.com/id/I4083d3b55605f70fef79bca9b90843c4390296f2
> ---
>  mm/kasan/common.c | 31 +++++++++++++++++++++----------
>  mm/slub.c         |  3 +--
>  2 files changed, 22 insertions(+), 12 deletions(-)
>
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index c5ec60e1a4d2..a581937c2a44 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -360,8 +360,14 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
>         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS))
>                 tag = assign_tag(cache, object, false, keep_tag);
>
> -       /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
> -       kasan_unpoison_memory(set_tag(object, tag), size);
> +       /*
> +        * Don't unpoison the object when keeping the tag. Tag is kept for:
> +        * 1. krealloc(), and then the memory has already been unpoisoned via ksize();
> +        * 2. kmalloc(), and then the memory has already been unpoisoned by kasan_kmalloc().
> +        * Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS.
> +        */
> +       if (!keep_tag)
> +               kasan_unpoison_memory(set_tag(object, tag), size);
>         kasan_poison_memory((void *)redzone_start, redzone_end - redzone_start,
>                 KASAN_KMALLOC_REDZONE);
>
> @@ -384,10 +390,9 @@ void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object
>  }
>  EXPORT_SYMBOL(__kasan_kmalloc);
>
> -void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
> -                                               gfp_t flags)
> +static void * __must_check ____kasan_kmalloc_large(struct page *page, const void *ptr,
> +                                               size_t size, gfp_t flags, bool realloc)
>  {
> -       struct page *page;
>         unsigned long redzone_start;
>         unsigned long redzone_end;
>
> @@ -397,18 +402,24 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
>         if (unlikely(ptr == NULL))
>                 return NULL;
>
> -       page = virt_to_page(ptr);
> -       redzone_start = round_up((unsigned long)(ptr + size),
> -                               KASAN_GRANULE_SIZE);
> +       redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
>         redzone_end = (unsigned long)ptr + page_size(page);
>
> -       kasan_unpoison_memory(ptr, size);
> +       /* ksize() in __do_krealloc() already unpoisoned the memory. */
> +       if (!realloc)
> +               kasan_unpoison_memory(ptr, size);
>         kasan_poison_memory((void *)redzone_start, redzone_end - redzone_start,
>                 KASAN_PAGE_REDZONE);
>
>         return (void *)ptr;
>  }
>
> +void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
> +                                               gfp_t flags)
> +{
> +       return ____kasan_kmalloc_large(virt_to_page(ptr), ptr, size, flags, false);
> +}
> +
>  void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
>  {
>         struct page *page;
> @@ -419,7 +430,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
>         page = virt_to_head_page(object);
>
>         if (unlikely(!PageSlab(page)))
> -               return __kasan_kmalloc_large(object, size, flags);
> +               return ____kasan_kmalloc_large(page, object, size, flags, true);
>         else
>                 return ____kasan_kmalloc(page->slab_cache, object, size,
>                                                 flags, true);
> diff --git a/mm/slub.c b/mm/slub.c
> index 1d3f2355df3b..afb035b0bf2d 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3535,8 +3535,7 @@ static void early_kmem_cache_node_alloc(int node)
>         init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
>         init_tracking(kmem_cache_node, n);
>  #endif
> -       n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
> -                     GFP_KERNEL);
> +       n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL);
>         page->freelist = get_freepointer(kmem_cache_node, n);
>         page->inuse = 1;
>         page->frozen = 0;
> --
> 2.29.0.rc1.297.gfa9743e501-goog
>
Andrey Konovalov Nov. 2, 2020, 3:17 p.m. UTC | #2
On Wed, Oct 28, 2020 at 5:55 PM Dmitry Vyukov <dvyukov@google.com> wrote:
>
> On Thu, Oct 22, 2020 at 3:20 PM Andrey Konovalov <andreyknvl@google.com> wrote:
> >
> > Since kasan_kmalloc() always follows kasan_slab_alloc(), there's no need
> > to reunpoison the object data, only to poison the redzone.
> >
> > This requires changing kasan annotation for early SLUB cache to
> > kasan_slab_alloc(). Otherwise kasan_kmalloc() doesn't untag the object.
> > This doesn't do any functional changes, as kmem_cache_node->object_size
> > is equal to sizeof(struct kmem_cache_node).
> >
> > Similarly for kasan_krealloc(), as it's called after ksize(), which
> > already unpoisoned the object, there's no need to do it again.
>
> Have you considered doing this the other way around: make krealloc
> call __ksize and unpoison in kasan_krealloc?
> This has the advantage of more precise poisoning as ksize will
> unpoison the whole underlying object.
>
> But then maybe we will need to move first checks in ksize into __ksize
> as we may need them in krealloc as well.

This might be a good idea. I won't implement this for the next
version, but will look into this after that. Thanks!

>
>
>
>
>
> > Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> > Link: https://linux-review.googlesource.com/id/I4083d3b55605f70fef79bca9b90843c4390296f2
> > ---
> >  mm/kasan/common.c | 31 +++++++++++++++++++++----------
> >  mm/slub.c         |  3 +--
> >  2 files changed, 22 insertions(+), 12 deletions(-)
> >
> > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > index c5ec60e1a4d2..a581937c2a44 100644
> > --- a/mm/kasan/common.c
> > +++ b/mm/kasan/common.c
> > @@ -360,8 +360,14 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
> >         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS))
> >                 tag = assign_tag(cache, object, false, keep_tag);
> >
> > -       /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
> > -       kasan_unpoison_memory(set_tag(object, tag), size);
> > +       /*
> > +        * Don't unpoison the object when keeping the tag. Tag is kept for:
> > +        * 1. krealloc(), and then the memory has already been unpoisoned via ksize();
> > +        * 2. kmalloc(), and then the memory has already been unpoisoned by kasan_kmalloc().
> > +        * Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS.
> > +        */
> > +       if (!keep_tag)
> > +               kasan_unpoison_memory(set_tag(object, tag), size);
> >         kasan_poison_memory((void *)redzone_start, redzone_end - redzone_start,
> >                 KASAN_KMALLOC_REDZONE);
> >
> > @@ -384,10 +390,9 @@ void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object
> >  }
> >  EXPORT_SYMBOL(__kasan_kmalloc);
> >
> > -void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
> > -                                               gfp_t flags)
> > +static void * __must_check ____kasan_kmalloc_large(struct page *page, const void *ptr,
> > +                                               size_t size, gfp_t flags, bool realloc)
> >  {
> > -       struct page *page;
> >         unsigned long redzone_start;
> >         unsigned long redzone_end;
> >
> > @@ -397,18 +402,24 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
> >         if (unlikely(ptr == NULL))
> >                 return NULL;
> >
> > -       page = virt_to_page(ptr);
> > -       redzone_start = round_up((unsigned long)(ptr + size),
> > -                               KASAN_GRANULE_SIZE);
> > +       redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
> >         redzone_end = (unsigned long)ptr + page_size(page);
> >
> > -       kasan_unpoison_memory(ptr, size);
> > +       /* ksize() in __do_krealloc() already unpoisoned the memory. */
> > +       if (!realloc)
> > +               kasan_unpoison_memory(ptr, size);
> >         kasan_poison_memory((void *)redzone_start, redzone_end - redzone_start,
> >                 KASAN_PAGE_REDZONE);
> >
> >         return (void *)ptr;
> >  }
> >
> > +void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
> > +                                               gfp_t flags)
> > +{
> > +       return ____kasan_kmalloc_large(virt_to_page(ptr), ptr, size, flags, false);
> > +}
> > +
> >  void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
> >  {
> >         struct page *page;
> > @@ -419,7 +430,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
> >         page = virt_to_head_page(object);
> >
> >         if (unlikely(!PageSlab(page)))
> > -               return __kasan_kmalloc_large(object, size, flags);
> > +               return ____kasan_kmalloc_large(page, object, size, flags, true);
> >         else
> >                 return ____kasan_kmalloc(page->slab_cache, object, size,
> >                                                 flags, true);
> > diff --git a/mm/slub.c b/mm/slub.c
> > index 1d3f2355df3b..afb035b0bf2d 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -3535,8 +3535,7 @@ static void early_kmem_cache_node_alloc(int node)
> >         init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
> >         init_tracking(kmem_cache_node, n);
> >  #endif
> > -       n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
> > -                     GFP_KERNEL);
> > +       n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL);
> >         page->freelist = get_freepointer(kmem_cache_node, n);
> >         page->inuse = 1;
> >         page->frozen = 0;
> > --
> > 2.29.0.rc1.297.gfa9743e501-goog
> >
diff mbox series

Patch

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index c5ec60e1a4d2..a581937c2a44 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -360,8 +360,14 @@  static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
 	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS))
 		tag = assign_tag(cache, object, false, keep_tag);
 
-	/* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
-	kasan_unpoison_memory(set_tag(object, tag), size);
+	/*
+	 * Don't unpoison the object when keeping the tag. Tag is kept for:
+	 * 1. krealloc(), and then the memory has already been unpoisoned via ksize();
+	 * 2. kmalloc(), and then the memory has already been unpoisoned by kasan_kmalloc().
+	 * Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS.
+	 */
+	if (!keep_tag)
+		kasan_unpoison_memory(set_tag(object, tag), size);
 	kasan_poison_memory((void *)redzone_start, redzone_end - redzone_start,
 		KASAN_KMALLOC_REDZONE);
 
@@ -384,10 +390,9 @@  void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object
 }
 EXPORT_SYMBOL(__kasan_kmalloc);
 
-void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
-						gfp_t flags)
+static void * __must_check ____kasan_kmalloc_large(struct page *page, const void *ptr,
+						size_t size, gfp_t flags, bool realloc)
 {
-	struct page *page;
 	unsigned long redzone_start;
 	unsigned long redzone_end;
 
@@ -397,18 +402,24 @@  void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
 	if (unlikely(ptr == NULL))
 		return NULL;
 
-	page = virt_to_page(ptr);
-	redzone_start = round_up((unsigned long)(ptr + size),
-				KASAN_GRANULE_SIZE);
+	redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
 	redzone_end = (unsigned long)ptr + page_size(page);
 
-	kasan_unpoison_memory(ptr, size);
+	/* ksize() in __do_krealloc() already unpoisoned the memory. */
+	if (!realloc)
+		kasan_unpoison_memory(ptr, size);
 	kasan_poison_memory((void *)redzone_start, redzone_end - redzone_start,
 		KASAN_PAGE_REDZONE);
 
 	return (void *)ptr;
 }
 
+void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
+						gfp_t flags)
+{
+	return ____kasan_kmalloc_large(virt_to_page(ptr), ptr, size, flags, false);
+}
+
 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
 {
 	struct page *page;
@@ -419,7 +430,7 @@  void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
 	page = virt_to_head_page(object);
 
 	if (unlikely(!PageSlab(page)))
-		return __kasan_kmalloc_large(object, size, flags);
+		return ____kasan_kmalloc_large(page, object, size, flags, true);
 	else
 		return ____kasan_kmalloc(page->slab_cache, object, size,
 						flags, true);
diff --git a/mm/slub.c b/mm/slub.c
index 1d3f2355df3b..afb035b0bf2d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3535,8 +3535,7 @@  static void early_kmem_cache_node_alloc(int node)
 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
 	init_tracking(kmem_cache_node, n);
 #endif
-	n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
-		      GFP_KERNEL);
+	n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL);
 	page->freelist = get_freepointer(kmem_cache_node, n);
 	page->inuse = 1;
 	page->frozen = 0;