diff mbox series

[2/5] kasan, kmemleak: pass tagged pointers to kmemleak

Message ID cd825aa4897b0fc37d3316838993881daccbe9f5.1549921721.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan: more tag based mode fixes | expand

Commit Message

Andrey Konovalov Feb. 11, 2019, 9:59 p.m. UTC
Right now we call kmemleak hooks before assigning tags to pointers in
KASAN hooks. As a result, when an objects gets allocated, kmemleak sees
a differently tagged pointer, compared to the one it sees when the object
gets freed. Fix it by calling KASAN hooks before kmemleak's ones.

Reported-by: Qian Cai <cai@lca.pw>
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 mm/slab.h        | 6 ++----
 mm/slab_common.c | 2 +-
 mm/slub.c        | 3 ++-
 3 files changed, 5 insertions(+), 6 deletions(-)

Comments

Vincenzo Frascino Feb. 12, 2019, 3:56 p.m. UTC | #1
On 11/02/2019 21:59, Andrey Konovalov wrote:
> Right now we call kmemleak hooks before assigning tags to pointers in
> KASAN hooks. As a result, when an objects gets allocated, kmemleak sees
> a differently tagged pointer, compared to the one it sees when the object
> gets freed. Fix it by calling KASAN hooks before kmemleak's ones.
>

Nit: Could you please add comments to the the code? It should prevent that the
code gets refactored in future, reintroducing the same issue.

> Reported-by: Qian Cai <cai@lca.pw>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> ---
>  mm/slab.h        | 6 ++----
>  mm/slab_common.c | 2 +-
>  mm/slub.c        | 3 ++-
>  3 files changed, 5 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/slab.h b/mm/slab.h
> index 4190c24ef0e9..638ea1b25d39 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -437,11 +437,9 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
>  
>  	flags &= gfp_allowed_mask;
>  	for (i = 0; i < size; i++) {
> -		void *object = p[i];
> -
> -		kmemleak_alloc_recursive(object, s->object_size, 1,
> +		p[i] = kasan_slab_alloc(s, p[i], flags);
> +		kmemleak_alloc_recursive(p[i], s->object_size, 1,
>  					 s->flags, flags);
> -		p[i] = kasan_slab_alloc(s, object, flags);
>  	}
>  
>  	if (memcg_kmem_enabled())
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 81732d05e74a..fe524c8d0246 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1228,8 +1228,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
>  	flags |= __GFP_COMP;
>  	page = alloc_pages(flags, order);
>  	ret = page ? page_address(page) : NULL;
> -	kmemleak_alloc(ret, size, 1, flags);
>  	ret = kasan_kmalloc_large(ret, size, flags);
> +	kmemleak_alloc(ret, size, 1, flags);
>  	return ret;
>  }
>  EXPORT_SYMBOL(kmalloc_order);
> diff --git a/mm/slub.c b/mm/slub.c
> index 1e3d0ec4e200..4a3d7686902f 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1374,8 +1374,9 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
>   */
>  static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
>  {
> +	ptr = kasan_kmalloc_large(ptr, size, flags);
>  	kmemleak_alloc(ptr, size, 1, flags);
> -	return kasan_kmalloc_large(ptr, size, flags);
> +	return ptr;
>  }
>  
>  static __always_inline void kfree_hook(void *x)
>
Andrey Konovalov Feb. 13, 2019, 1:07 p.m. UTC | #2
On Tue, Feb 12, 2019 at 4:57 PM Vincenzo Frascino
<vincenzo.frascino@arm.com> wrote:
>
> On 11/02/2019 21:59, Andrey Konovalov wrote:
> > Right now we call kmemleak hooks before assigning tags to pointers in
> > KASAN hooks. As a result, when an objects gets allocated, kmemleak sees
> > a differently tagged pointer, compared to the one it sees when the object
> > gets freed. Fix it by calling KASAN hooks before kmemleak's ones.
> >
>
> Nit: Could you please add comments to the the code? It should prevent that the
> code gets refactored in future, reintroducing the same issue.

Sure, I'll send v2 with comments, thanks!

>
> > Reported-by: Qian Cai <cai@lca.pw>
> > Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> > ---
> >  mm/slab.h        | 6 ++----
> >  mm/slab_common.c | 2 +-
> >  mm/slub.c        | 3 ++-
> >  3 files changed, 5 insertions(+), 6 deletions(-)
> >
> > diff --git a/mm/slab.h b/mm/slab.h
> > index 4190c24ef0e9..638ea1b25d39 100644
> > --- a/mm/slab.h
> > +++ b/mm/slab.h
> > @@ -437,11 +437,9 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
> >
> >       flags &= gfp_allowed_mask;
> >       for (i = 0; i < size; i++) {
> > -             void *object = p[i];
> > -
> > -             kmemleak_alloc_recursive(object, s->object_size, 1,
> > +             p[i] = kasan_slab_alloc(s, p[i], flags);
> > +             kmemleak_alloc_recursive(p[i], s->object_size, 1,
> >                                        s->flags, flags);
> > -             p[i] = kasan_slab_alloc(s, object, flags);
> >       }
> >
> >       if (memcg_kmem_enabled())
> > diff --git a/mm/slab_common.c b/mm/slab_common.c
> > index 81732d05e74a..fe524c8d0246 100644
> > --- a/mm/slab_common.c
> > +++ b/mm/slab_common.c
> > @@ -1228,8 +1228,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
> >       flags |= __GFP_COMP;
> >       page = alloc_pages(flags, order);
> >       ret = page ? page_address(page) : NULL;
> > -     kmemleak_alloc(ret, size, 1, flags);
> >       ret = kasan_kmalloc_large(ret, size, flags);
> > +     kmemleak_alloc(ret, size, 1, flags);
> >       return ret;
> >  }
> >  EXPORT_SYMBOL(kmalloc_order);
> > diff --git a/mm/slub.c b/mm/slub.c
> > index 1e3d0ec4e200..4a3d7686902f 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -1374,8 +1374,9 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
> >   */
> >  static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
> >  {
> > +     ptr = kasan_kmalloc_large(ptr, size, flags);
> >       kmemleak_alloc(ptr, size, 1, flags);
> > -     return kasan_kmalloc_large(ptr, size, flags);
> > +     return ptr;
> >  }
> >
> >  static __always_inline void kfree_hook(void *x)
> >
>
> --
> Regards,
> Vincenzo
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@googlegroups.com.
> To post to this group, send email to kasan-dev@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/f57831be-c57a-4a9e-992e-1f193866467b%40arm.com.
> For more options, visit https://groups.google.com/d/optout.
diff mbox series

Patch

diff --git a/mm/slab.h b/mm/slab.h
index 4190c24ef0e9..638ea1b25d39 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -437,11 +437,9 @@  static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
 
 	flags &= gfp_allowed_mask;
 	for (i = 0; i < size; i++) {
-		void *object = p[i];
-
-		kmemleak_alloc_recursive(object, s->object_size, 1,
+		p[i] = kasan_slab_alloc(s, p[i], flags);
+		kmemleak_alloc_recursive(p[i], s->object_size, 1,
 					 s->flags, flags);
-		p[i] = kasan_slab_alloc(s, object, flags);
 	}
 
 	if (memcg_kmem_enabled())
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 81732d05e74a..fe524c8d0246 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1228,8 +1228,8 @@  void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
 	flags |= __GFP_COMP;
 	page = alloc_pages(flags, order);
 	ret = page ? page_address(page) : NULL;
-	kmemleak_alloc(ret, size, 1, flags);
 	ret = kasan_kmalloc_large(ret, size, flags);
+	kmemleak_alloc(ret, size, 1, flags);
 	return ret;
 }
 EXPORT_SYMBOL(kmalloc_order);
diff --git a/mm/slub.c b/mm/slub.c
index 1e3d0ec4e200..4a3d7686902f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1374,8 +1374,9 @@  static inline void dec_slabs_node(struct kmem_cache *s, int node,
  */
 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
 {
+	ptr = kasan_kmalloc_large(ptr, size, flags);
 	kmemleak_alloc(ptr, size, 1, flags);
-	return kasan_kmalloc_large(ptr, size, flags);
+	return ptr;
 }
 
 static __always_inline void kfree_hook(void *x)