diff mbox series

[v3,mm,08/13] kasan, mm: optimize krealloc poisoning

Message ID 9bef90327c9cb109d736c40115684fd32f49e6b0.1612546384.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan: optimizations and fixes for HW_TAGS | expand

Commit Message

Andrey Konovalov Feb. 5, 2021, 5:34 p.m. UTC
Currently, krealloc() always calls ksize(), which unpoisons the whole
object including the redzone. This is inefficient, as kasan_krealloc()
repoisons the redzone for objects that fit into the same buffer.

This patch changes krealloc() instrumentation to use uninstrumented
__ksize() that doesn't unpoison the memory. Instead, kasan_kreallos()
is changed to unpoison the memory excluding the redzone.

For objects that don't fit into the old allocation, this patch disables
KASAN accessibility checks when copying memory into a new object instead
of unpoisoning it.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 mm/kasan/common.c | 12 ++++++++++--
 mm/slab_common.c  | 20 ++++++++++++++------
 2 files changed, 24 insertions(+), 8 deletions(-)

Comments

Marco Elver Feb. 8, 2021, 10:48 a.m. UTC | #1
On Fri, Feb 05, 2021 at 06:34PM +0100, Andrey Konovalov wrote:
> Currently, krealloc() always calls ksize(), which unpoisons the whole
> object including the redzone. This is inefficient, as kasan_krealloc()
> repoisons the redzone for objects that fit into the same buffer.
> 
> This patch changes krealloc() instrumentation to use uninstrumented
> __ksize() that doesn't unpoison the memory. Instead, kasan_kreallos()
> is changed to unpoison the memory excluding the redzone.
> 
> For objects that don't fit into the old allocation, this patch disables
> KASAN accessibility checks when copying memory into a new object instead
> of unpoisoning it.
> 
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>

Reviewed-by: Marco Elver <elver@google.com>

Clarification below.

> ---
>  mm/kasan/common.c | 12 ++++++++++--
>  mm/slab_common.c  | 20 ++++++++++++++------
>  2 files changed, 24 insertions(+), 8 deletions(-)
> 
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 7ea643f7e69c..a8a67dca5e55 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -476,7 +476,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
>  
>  	/*
>  	 * The object has already been unpoisoned by kasan_slab_alloc() for
> -	 * kmalloc() or by ksize() for krealloc().
> +	 * kmalloc() or by kasan_krealloc() for krealloc().
>  	 */
>  
>  	/*
> @@ -526,7 +526,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
>  
>  	/*
>  	 * The object has already been unpoisoned by kasan_alloc_pages() for
> -	 * alloc_pages() or by ksize() for krealloc().
> +	 * alloc_pages() or by kasan_krealloc() for krealloc().
>  	 */
>  
>  	/*
> @@ -554,8 +554,16 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
>  	if (unlikely(object == ZERO_SIZE_PTR))
>  		return (void *)object;
>  
> +	/*
> +	 * Unpoison the object's data.
> +	 * Part of it might already have been unpoisoned, but it's unknown
> +	 * how big that part is.
> +	 */
> +	kasan_unpoison(object, size);
> +
>  	page = virt_to_head_page(object);
>  
> +	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
>  	if (unlikely(!PageSlab(page)))
>  		return __kasan_kmalloc_large(object, size, flags);
>  	else
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index dad70239b54c..60a2f49df6ce 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1140,19 +1140,27 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
>  	void *ret;
>  	size_t ks;
>  
> -	if (likely(!ZERO_OR_NULL_PTR(p)) && !kasan_check_byte(p))
> -		return NULL;
> -
> -	ks = ksize(p);
> +	/* Don't use instrumented ksize to allow precise KASAN poisoning. */
> +	if (likely(!ZERO_OR_NULL_PTR(p))) {
> +		if (!kasan_check_byte(p))
> +			return NULL;

Just checking: Check byte returns true if the object is not tracked by KASAN, right? I.e. if it's a KFENCE object, kasan_check_byte() always returns true.

> +		ks = kfence_ksize(p) ?: __ksize(p);
> +	} else
> +		ks = 0;
>  
> +	/* If the object still fits, repoison it precisely. */
>  	if (ks >= new_size) {
>  		p = kasan_krealloc((void *)p, new_size, flags);
>  		return (void *)p;
>  	}
>  
>  	ret = kmalloc_track_caller(new_size, flags);
> -	if (ret && p)
> -		memcpy(ret, p, ks);
> +	if (ret && p) {
> +		/* Disable KASAN checks as the object's redzone is accessed. */
> +		kasan_disable_current();
> +		memcpy(ret, kasan_reset_tag(p), ks);
> +		kasan_enable_current();
> +	}
>  
>  	return ret;
>  }
> -- 
> 2.30.0.365.g02bc693789-goog
>
Andrey Konovalov Feb. 9, 2021, 1:24 p.m. UTC | #2
On Mon, Feb 8, 2021 at 11:48 AM Marco Elver <elver@google.com> wrote:
>
> On Fri, Feb 05, 2021 at 06:34PM +0100, Andrey Konovalov wrote:
> > Currently, krealloc() always calls ksize(), which unpoisons the whole
> > object including the redzone. This is inefficient, as kasan_krealloc()
> > repoisons the redzone for objects that fit into the same buffer.
> >
> > This patch changes krealloc() instrumentation to use uninstrumented
> > __ksize() that doesn't unpoison the memory. Instead, kasan_kreallos()
> > is changed to unpoison the memory excluding the redzone.
> >
> > For objects that don't fit into the old allocation, this patch disables
> > KASAN accessibility checks when copying memory into a new object instead
> > of unpoisoning it.
> >
> > Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
>
> Reviewed-by: Marco Elver <elver@google.com>
>
> Clarification below.
>
> > ---
> >  mm/kasan/common.c | 12 ++++++++++--
> >  mm/slab_common.c  | 20 ++++++++++++++------
> >  2 files changed, 24 insertions(+), 8 deletions(-)
> >
> > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > index 7ea643f7e69c..a8a67dca5e55 100644
> > --- a/mm/kasan/common.c
> > +++ b/mm/kasan/common.c
> > @@ -476,7 +476,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
> >
> >       /*
> >        * The object has already been unpoisoned by kasan_slab_alloc() for
> > -      * kmalloc() or by ksize() for krealloc().
> > +      * kmalloc() or by kasan_krealloc() for krealloc().
> >        */
> >
> >       /*
> > @@ -526,7 +526,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
> >
> >       /*
> >        * The object has already been unpoisoned by kasan_alloc_pages() for
> > -      * alloc_pages() or by ksize() for krealloc().
> > +      * alloc_pages() or by kasan_krealloc() for krealloc().
> >        */
> >
> >       /*
> > @@ -554,8 +554,16 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
> >       if (unlikely(object == ZERO_SIZE_PTR))
> >               return (void *)object;
> >
> > +     /*
> > +      * Unpoison the object's data.
> > +      * Part of it might already have been unpoisoned, but it's unknown
> > +      * how big that part is.
> > +      */
> > +     kasan_unpoison(object, size);
> > +
> >       page = virt_to_head_page(object);
> >
> > +     /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
> >       if (unlikely(!PageSlab(page)))
> >               return __kasan_kmalloc_large(object, size, flags);
> >       else
> > diff --git a/mm/slab_common.c b/mm/slab_common.c
> > index dad70239b54c..60a2f49df6ce 100644
> > --- a/mm/slab_common.c
> > +++ b/mm/slab_common.c
> > @@ -1140,19 +1140,27 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
> >       void *ret;
> >       size_t ks;
> >
> > -     if (likely(!ZERO_OR_NULL_PTR(p)) && !kasan_check_byte(p))
> > -             return NULL;
> > -
> > -     ks = ksize(p);
> > +     /* Don't use instrumented ksize to allow precise KASAN poisoning. */
> > +     if (likely(!ZERO_OR_NULL_PTR(p))) {
> > +             if (!kasan_check_byte(p))
> > +                     return NULL;
>
> Just checking: Check byte returns true if the object is not tracked by KASAN, right? I.e. if it's a KFENCE object, kasan_check_byte() always returns true.

kasan_check_byte() still performs the check, but since KFENCE objects
are never poisoned, the check always passes.

Thanks!
diff mbox series

Patch

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 7ea643f7e69c..a8a67dca5e55 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -476,7 +476,7 @@  static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
 
 	/*
 	 * The object has already been unpoisoned by kasan_slab_alloc() for
-	 * kmalloc() or by ksize() for krealloc().
+	 * kmalloc() or by kasan_krealloc() for krealloc().
 	 */
 
 	/*
@@ -526,7 +526,7 @@  void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
 
 	/*
 	 * The object has already been unpoisoned by kasan_alloc_pages() for
-	 * alloc_pages() or by ksize() for krealloc().
+	 * alloc_pages() or by kasan_krealloc() for krealloc().
 	 */
 
 	/*
@@ -554,8 +554,16 @@  void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
 	if (unlikely(object == ZERO_SIZE_PTR))
 		return (void *)object;
 
+	/*
+	 * Unpoison the object's data.
+	 * Part of it might already have been unpoisoned, but it's unknown
+	 * how big that part is.
+	 */
+	kasan_unpoison(object, size);
+
 	page = virt_to_head_page(object);
 
+	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
 	if (unlikely(!PageSlab(page)))
 		return __kasan_kmalloc_large(object, size, flags);
 	else
diff --git a/mm/slab_common.c b/mm/slab_common.c
index dad70239b54c..60a2f49df6ce 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1140,19 +1140,27 @@  static __always_inline void *__do_krealloc(const void *p, size_t new_size,
 	void *ret;
 	size_t ks;
 
-	if (likely(!ZERO_OR_NULL_PTR(p)) && !kasan_check_byte(p))
-		return NULL;
-
-	ks = ksize(p);
+	/* Don't use instrumented ksize to allow precise KASAN poisoning. */
+	if (likely(!ZERO_OR_NULL_PTR(p))) {
+		if (!kasan_check_byte(p))
+			return NULL;
+		ks = kfence_ksize(p) ?: __ksize(p);
+	} else
+		ks = 0;
 
+	/* If the object still fits, repoison it precisely. */
 	if (ks >= new_size) {
 		p = kasan_krealloc((void *)p, new_size, flags);
 		return (void *)p;
 	}
 
 	ret = kmalloc_track_caller(new_size, flags);
-	if (ret && p)
-		memcpy(ret, p, ks);
+	if (ret && p) {
+		/* Disable KASAN checks as the object's redzone is accessed. */
+		kasan_disable_current();
+		memcpy(ret, kasan_reset_tag(p), ks);
+		kasan_enable_current();
+	}
 
 	return ret;
 }