diff mbox series

[v3,11/15] kasan: move _RET_IP_ to inline wrappers

Message ID 03fae8b66a7f4b85abadc80a2d216ac4db815444.1610652890.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan: HW_TAGS tests support and fixes | expand

Commit Message

Andrey Konovalov Jan. 14, 2021, 7:36 p.m. UTC
Generic mm functions that call KASAN annotations that might report a bug
pass _RET_IP_ to them as an argument. This allows KASAN to include the
name of the function that called the mm function in its report's header.

Now that KASAN has inline wrappers for all of its annotations, move
_RET_IP_ to those wrappers to simplify annotation call sites.

Link: https://linux-review.googlesource.com/id/I8fb3c06d49671305ee184175a39591bc26647a67
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 include/linux/kasan.h | 20 +++++++++-----------
 mm/mempool.c          |  2 +-
 mm/slab.c             |  2 +-
 mm/slub.c             |  4 ++--
 4 files changed, 13 insertions(+), 15 deletions(-)

Comments

Marco Elver Jan. 15, 2021, 1:19 p.m. UTC | #1
On Thu, Jan 14, 2021 at 08:36PM +0100, Andrey Konovalov wrote:
> Generic mm functions that call KASAN annotations that might report a bug
> pass _RET_IP_ to them as an argument. This allows KASAN to include the
> name of the function that called the mm function in its report's header.
> 
> Now that KASAN has inline wrappers for all of its annotations, move
> _RET_IP_ to those wrappers to simplify annotation call sites.
> 
> Link: https://linux-review.googlesource.com/id/I8fb3c06d49671305ee184175a39591bc26647a67
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>

Much nicer!

Reviewed-by: Marco Elver <elver@google.com>

> ---
>  include/linux/kasan.h | 20 +++++++++-----------
>  mm/mempool.c          |  2 +-
>  mm/slab.c             |  2 +-
>  mm/slub.c             |  4 ++--
>  4 files changed, 13 insertions(+), 15 deletions(-)
> 
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 5e0655fb2a6f..bba1637827c3 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -181,19 +181,18 @@ static __always_inline void * __must_check kasan_init_slab_obj(
>  }
>  
>  bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
> -static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
> -						unsigned long ip)
> +static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
>  {
>  	if (kasan_enabled())
> -		return __kasan_slab_free(s, object, ip);
> +		return __kasan_slab_free(s, object, _RET_IP_);
>  	return false;
>  }
>  
>  void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
> -static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
> +static __always_inline void kasan_slab_free_mempool(void *ptr)
>  {
>  	if (kasan_enabled())
> -		__kasan_slab_free_mempool(ptr, ip);
> +		__kasan_slab_free_mempool(ptr, _RET_IP_);
>  }
>  
>  void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
> @@ -237,10 +236,10 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
>  }
>  
>  void __kasan_kfree_large(void *ptr, unsigned long ip);
> -static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
> +static __always_inline void kasan_kfree_large(void *ptr)
>  {
>  	if (kasan_enabled())
> -		__kasan_kfree_large(ptr, ip);
> +		__kasan_kfree_large(ptr, _RET_IP_);
>  }
>  
>  bool kasan_save_enable_multi_shot(void);
> @@ -273,12 +272,11 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
>  {
>  	return (void *)object;
>  }
> -static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
> -				   unsigned long ip)
> +static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
>  {
>  	return false;
>  }
> -static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
> +static inline void kasan_slab_free_mempool(void *ptr) {}
>  static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
>  				   gfp_t flags)
>  {
> @@ -298,7 +296,7 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
>  {
>  	return (void *)object;
>  }
> -static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
> +static inline void kasan_kfree_large(void *ptr) {}
>  
>  #endif /* CONFIG_KASAN */
>  
> diff --git a/mm/mempool.c b/mm/mempool.c
> index 624ed51b060f..79959fac27d7 100644
> --- a/mm/mempool.c
> +++ b/mm/mempool.c
> @@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element)
>  static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
>  {
>  	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
> -		kasan_slab_free_mempool(element, _RET_IP_);
> +		kasan_slab_free_mempool(element);
>  	else if (pool->alloc == mempool_alloc_pages)
>  		kasan_free_pages(element, (unsigned long)pool->pool_data);
>  }
> diff --git a/mm/slab.c b/mm/slab.c
> index d7c8da9319c7..afeb6191fb1e 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -3421,7 +3421,7 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
>  		memset(objp, 0, cachep->object_size);
>  
>  	/* Put the object into the quarantine, don't touch it for now. */
> -	if (kasan_slab_free(cachep, objp, _RET_IP_))
> +	if (kasan_slab_free(cachep, objp))
>  		return;
>  
>  	/* Use KCSAN to help debug racy use-after-free. */
> diff --git a/mm/slub.c b/mm/slub.c
> index 75fb097d990d..0afb53488238 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1514,7 +1514,7 @@ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
>  static __always_inline void kfree_hook(void *x)
>  {
>  	kmemleak_free(x);
> -	kasan_kfree_large(x, _RET_IP_);
> +	kasan_kfree_large(x);
>  }
>  
>  static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
> @@ -1544,7 +1544,7 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
>  				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
>  
>  	/* KASAN might put x into memory quarantine, delaying its reuse */
> -	return kasan_slab_free(s, x, _RET_IP_);
> +	return kasan_slab_free(s, x);
>  }
>  
>  static inline bool slab_free_freelist_hook(struct kmem_cache *s,
> -- 
> 2.30.0.284.gd98b1dd5eaa7-goog
>
Alexander Potapenko Jan. 15, 2021, 2:07 p.m. UTC | #2
On Fri, Jan 15, 2021 at 2:19 PM Marco Elver <elver@google.com> wrote:
>
> On Thu, Jan 14, 2021 at 08:36PM +0100, Andrey Konovalov wrote:
> > Generic mm functions that call KASAN annotations that might report a bug
> > pass _RET_IP_ to them as an argument. This allows KASAN to include the
> > name of the function that called the mm function in its report's header.
> >
> > Now that KASAN has inline wrappers for all of its annotations, move
> > _RET_IP_ to those wrappers to simplify annotation call sites.
> >
> > Link: https://linux-review.googlesource.com/id/I8fb3c06d49671305ee184175a39591bc26647a67
> > Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
>
> Much nicer!
>
> Reviewed-by: Marco Elver <elver@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
diff mbox series

Patch

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5e0655fb2a6f..bba1637827c3 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -181,19 +181,18 @@  static __always_inline void * __must_check kasan_init_slab_obj(
 }
 
 bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
-static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
-						unsigned long ip)
+static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
 {
 	if (kasan_enabled())
-		return __kasan_slab_free(s, object, ip);
+		return __kasan_slab_free(s, object, _RET_IP_);
 	return false;
 }
 
 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
-static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
+static __always_inline void kasan_slab_free_mempool(void *ptr)
 {
 	if (kasan_enabled())
-		__kasan_slab_free_mempool(ptr, ip);
+		__kasan_slab_free_mempool(ptr, _RET_IP_);
 }
 
 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
@@ -237,10 +236,10 @@  static __always_inline void * __must_check kasan_krealloc(const void *object,
 }
 
 void __kasan_kfree_large(void *ptr, unsigned long ip);
-static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
+static __always_inline void kasan_kfree_large(void *ptr)
 {
 	if (kasan_enabled())
-		__kasan_kfree_large(ptr, ip);
+		__kasan_kfree_large(ptr, _RET_IP_);
 }
 
 bool kasan_save_enable_multi_shot(void);
@@ -273,12 +272,11 @@  static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
 {
 	return (void *)object;
 }
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
-				   unsigned long ip)
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
 {
 	return false;
 }
-static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
+static inline void kasan_slab_free_mempool(void *ptr) {}
 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
 				   gfp_t flags)
 {
@@ -298,7 +296,7 @@  static inline void *kasan_krealloc(const void *object, size_t new_size,
 {
 	return (void *)object;
 }
-static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
+static inline void kasan_kfree_large(void *ptr) {}
 
 #endif /* CONFIG_KASAN */
 
diff --git a/mm/mempool.c b/mm/mempool.c
index 624ed51b060f..79959fac27d7 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -104,7 +104,7 @@  static inline void poison_element(mempool_t *pool, void *element)
 static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
 {
 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
-		kasan_slab_free_mempool(element, _RET_IP_);
+		kasan_slab_free_mempool(element);
 	else if (pool->alloc == mempool_alloc_pages)
 		kasan_free_pages(element, (unsigned long)pool->pool_data);
 }
diff --git a/mm/slab.c b/mm/slab.c
index d7c8da9319c7..afeb6191fb1e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3421,7 +3421,7 @@  static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
 		memset(objp, 0, cachep->object_size);
 
 	/* Put the object into the quarantine, don't touch it for now. */
-	if (kasan_slab_free(cachep, objp, _RET_IP_))
+	if (kasan_slab_free(cachep, objp))
 		return;
 
 	/* Use KCSAN to help debug racy use-after-free. */
diff --git a/mm/slub.c b/mm/slub.c
index 75fb097d990d..0afb53488238 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1514,7 +1514,7 @@  static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
 static __always_inline void kfree_hook(void *x)
 {
 	kmemleak_free(x);
-	kasan_kfree_large(x, _RET_IP_);
+	kasan_kfree_large(x);
 }
 
 static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
@@ -1544,7 +1544,7 @@  static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
 				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
 
 	/* KASAN might put x into memory quarantine, delaying its reuse */
-	return kasan_slab_free(s, x, _RET_IP_);
+	return kasan_slab_free(s, x);
 }
 
 static inline bool slab_free_freelist_hook(struct kmem_cache *s,