@@ -424,15 +424,10 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
}
}
-static void save_alloc_info(struct kmem_cache *cache, void *object,
- gfp_t flags, bool is_kmalloc)
+static void save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
{
struct kasan_alloc_meta *alloc_meta;
- /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
- if (cache->kasan_info.is_kmalloc && !is_kmalloc)
- return;
-
alloc_meta = kasan_get_alloc_meta(cache, object);
if (alloc_meta)
kasan_set_track(&alloc_meta->alloc_track, flags);
@@ -467,8 +462,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
kasan_unpoison(tagged_object, cache->object_size, init);
/* Save alloc info (if possible) for non-kmalloc() allocations. */
- if (kasan_stack_collection_enabled())
- save_alloc_info(cache, (void *)object, flags, false);
+ if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
+ save_alloc_info(cache, (void *)object, flags);
return tagged_object;
}
@@ -513,8 +508,8 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
* Save alloc info (if possible) for kmalloc() allocations.
* This also rewrites the alloc info when called from kasan_krealloc().
*/
- if (kasan_stack_collection_enabled())
- save_alloc_info(cache, (void *)object, flags, true);
+ if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc)
+ save_alloc_info(cache, (void *)object, flags);
/* Keep the tag that was set by kasan_slab_alloc(). */
return (void *)object;