@@ -279,7 +279,8 @@ void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
* based on objects indexes, so that objects that are next to each other
* get different tags.
*/
-static u8 assign_tag(struct kmem_cache *cache, const void *object, bool init)
+static inline u8 assign_tag(struct kmem_cache *cache,
+ const void *object, bool init)
{
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
return 0xff;
@@ -321,8 +322,8 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
return (void *)object;
}
-static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
- unsigned long ip, bool quarantine)
+static inline bool ____kasan_slab_free(struct kmem_cache *cache,
+ void *object, unsigned long ip, bool quarantine)
{
u8 tag;
void *tagged_object;
@@ -366,7 +367,7 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
return ____kasan_slab_free(cache, object, ip, true);
}
-static bool ____kasan_kfree_large(void *ptr, unsigned long ip)
+static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
{
if (ptr != page_address(virt_to_head_page(ptr))) {
kasan_report_invalid_free(ptr, ip);
@@ -461,8 +462,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
return tagged_object;
}
-static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
- size_t size, gfp_t flags)
+static inline void *____kasan_kmalloc(struct kmem_cache *cache,
+ const void *object, size_t size, gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;
Mark all static functions in common.c and kasan.h that are used for hardware tag-based KASAN as inline to avoid unnecessary function calls. Signed-off-by: Andrey Konovalov <andreyknvl@google.com> --- mm/kasan/common.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-)