diff mbox series

[11/12] kasan: always inline HW_TAGS helper functions

Message ID 05a45017b4cb15344395650e880bbab0fe6ba3e4.1612208222.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan: optimizations and fixes for HW_TAGS | expand

Commit Message

Andrey Konovalov Feb. 1, 2021, 7:43 p.m. UTC
Mark all static functions in common.c and kasan.h that are used for
hardware tag-based KASAN as __always_inline to avoid unnecessary
function calls.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 mm/kasan/common.c | 13 +++++++------
 mm/kasan/kasan.h  |  6 +++---
 2 files changed, 10 insertions(+), 9 deletions(-)

Comments

Marco Elver Feb. 3, 2021, 3:51 p.m. UTC | #1
On Mon, Feb 01, 2021 at 08:43PM +0100, Andrey Konovalov wrote:
> Mark all static functions in common.c and kasan.h that are used for
> hardware tag-based KASAN as __always_inline to avoid unnecessary
> function calls.
> 
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>

Does objtool complain about any of these?

I'm not sure this is unconditionally a good idea. If there isn't a
quantifiable performance bug or case where we cannot call a function,
perhaps we can just let the compiler decide?

More comments below.

> ---
>  mm/kasan/common.c | 13 +++++++------
>  mm/kasan/kasan.h  |  6 +++---
>  2 files changed, 10 insertions(+), 9 deletions(-)
> 
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 5691cca69397..2004ecd6e43c 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -279,7 +279,8 @@ void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
>   *    based on objects indexes, so that objects that are next to each other
>   *    get different tags.
>   */
> -static u8 assign_tag(struct kmem_cache *cache, const void *object, bool init)
> +static __always_inline u8 assign_tag(struct kmem_cache *cache,
> +					const void *object, bool init)

This function might be small enough that it's fine.

>  {
>  	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
>  		return 0xff;
> @@ -321,8 +322,8 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
>  	return (void *)object;
>  }
>  
> -static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
> -			      unsigned long ip, bool quarantine)
> +static __always_inline bool ____kasan_slab_free(struct kmem_cache *cache,
> +				void *object, unsigned long ip, bool quarantine)
>  {

Because ____kasan_slab_free() is tail-called by __kasan_slab_free() and
__kasan_slab_free_mempool(), there should never be a call (and if there
is we need to figure out why). The additional code-bloat and I-cache
pressure might be worse vs. just a jump. I'd let the compiler decide.

>  	u8 tag;
>  	void *tagged_object;
> @@ -366,7 +367,7 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
>  	return ____kasan_slab_free(cache, object, ip, true);
>  }
>  
> -static bool ____kasan_kfree_large(void *ptr, unsigned long ip)
> +static __always_inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
>  {

This one is tail-called by __kasan_kfree_large(). The usage in
__kasan_slab_free_mempool() is in an unlikely branch.

>  	if (ptr != page_address(virt_to_head_page(ptr))) {
>  		kasan_report_invalid_free(ptr, ip);
> @@ -461,8 +462,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
>  	return tagged_object;
>  }
>  
> -static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
> -					size_t size, gfp_t flags)
> +static __always_inline void *____kasan_kmalloc(struct kmem_cache *cache,
> +				const void *object, size_t size, gfp_t flags)
>  {

Also only tail-called.

>  	unsigned long redzone_start;
>  	unsigned long redzone_end;
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index 2f7400a3412f..d5fe72747a53 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -321,7 +321,7 @@ static inline u8 kasan_random_tag(void) { return 0; }
>  
>  #ifdef CONFIG_KASAN_HW_TAGS
>  
> -static inline void kasan_poison(const void *addr, size_t size, u8 value)
> +static __always_inline void kasan_poison(const void *addr, size_t size, u8 value)
>  {
>  	addr = kasan_reset_tag(addr);
>  
> @@ -337,7 +337,7 @@ static inline void kasan_poison(const void *addr, size_t size, u8 value)
>  	hw_set_mem_tag_range((void *)addr, size, value);
>  }
>  
> -static inline void kasan_unpoison(const void *addr, size_t size)
> +static __always_inline void kasan_unpoison(const void *addr, size_t size)
>  {

Not sure about these 2. They should be small, but it's hard to say what
is ideal on which architecture.

>  	u8 tag = get_tag(addr);
>  
> @@ -354,7 +354,7 @@ static inline void kasan_unpoison(const void *addr, size_t size)
>  	hw_set_mem_tag_range((void *)addr, size, tag);
>  }
>  
> -static inline bool kasan_byte_accessible(const void *addr)
> +static __always_inline bool kasan_byte_accessible(const void *addr)

This function feels like a macro and if the compiler uninlined it, we
could argue it's a bug. But not sure if we need the __always_inline,
unless you've seen this uninlined.

>  {
>  	u8 ptr_tag = get_tag(addr);
>  	u8 mem_tag = hw_get_mem_tag((void *)addr);
> -- 
> 2.30.0.365.g02bc693789-goog
>
diff mbox series

Patch

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 5691cca69397..2004ecd6e43c 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -279,7 +279,8 @@  void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
  *    based on objects indexes, so that objects that are next to each other
  *    get different tags.
  */
-static u8 assign_tag(struct kmem_cache *cache, const void *object, bool init)
+static __always_inline u8 assign_tag(struct kmem_cache *cache,
+					const void *object, bool init)
 {
 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
 		return 0xff;
@@ -321,8 +322,8 @@  void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
 	return (void *)object;
 }
 
-static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
-			      unsigned long ip, bool quarantine)
+static __always_inline bool ____kasan_slab_free(struct kmem_cache *cache,
+				void *object, unsigned long ip, bool quarantine)
 {
 	u8 tag;
 	void *tagged_object;
@@ -366,7 +367,7 @@  bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
 	return ____kasan_slab_free(cache, object, ip, true);
 }
 
-static bool ____kasan_kfree_large(void *ptr, unsigned long ip)
+static __always_inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
 {
 	if (ptr != page_address(virt_to_head_page(ptr))) {
 		kasan_report_invalid_free(ptr, ip);
@@ -461,8 +462,8 @@  void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
 	return tagged_object;
 }
 
-static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
-					size_t size, gfp_t flags)
+static __always_inline void *____kasan_kmalloc(struct kmem_cache *cache,
+				const void *object, size_t size, gfp_t flags)
 {
 	unsigned long redzone_start;
 	unsigned long redzone_end;
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 2f7400a3412f..d5fe72747a53 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -321,7 +321,7 @@  static inline u8 kasan_random_tag(void) { return 0; }
 
 #ifdef CONFIG_KASAN_HW_TAGS
 
-static inline void kasan_poison(const void *addr, size_t size, u8 value)
+static __always_inline void kasan_poison(const void *addr, size_t size, u8 value)
 {
 	addr = kasan_reset_tag(addr);
 
@@ -337,7 +337,7 @@  static inline void kasan_poison(const void *addr, size_t size, u8 value)
 	hw_set_mem_tag_range((void *)addr, size, value);
 }
 
-static inline void kasan_unpoison(const void *addr, size_t size)
+static __always_inline void kasan_unpoison(const void *addr, size_t size)
 {
 	u8 tag = get_tag(addr);
 
@@ -354,7 +354,7 @@  static inline void kasan_unpoison(const void *addr, size_t size)
 	hw_set_mem_tag_range((void *)addr, size, tag);
 }
 
-static inline bool kasan_byte_accessible(const void *addr)
+static __always_inline bool kasan_byte_accessible(const void *addr)
 {
 	u8 ptr_tag = get_tag(addr);
 	u8 mem_tag = hw_get_mem_tag((void *)addr);