@@ -303,9 +303,10 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
* mempool).
*
* This function unpoisons a slab allocation that was previously poisoned via
- * kasan_mempool_poison_object() without initializing its memory. For the
- * tag-based modes, this function does not assign a new tag to the allocation
- * and instead restores the original tags based on the pointer value.
+ * kasan_mempool_poison_object() and saves an alloc stack trace for it without
+ * initializing the allocation's memory. For the tag-based modes, this function
+ * does not assign a new tag to the allocation and instead restores the
+ * original tags based on the pointer value.
*
* This function operates on all slab allocations including large kmalloc
* allocations (the ones returned by kmalloc_large() or by kmalloc() with the
@@ -277,6 +277,20 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
/* The object will be poisoned by kasan_poison_pages(). */
}
+void unpoison_slab_object(struct kmem_cache *cache, void *object, gfp_t flags,
+ bool init)
+{
+ /*
+ * Unpoison the whole object. For kmalloc() allocations,
+ * poison_kmalloc_redzone() will do precise poisoning.
+ */
+ kasan_unpoison(object, cache->object_size, init);
+
+ /* Save alloc info (if possible) for non-kmalloc() allocations. */
+ if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
+ kasan_save_alloc_info(cache, object, flags);
+}
+
void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
void *object, gfp_t flags, bool init)
{
@@ -299,15 +313,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
tag = assign_tag(cache, object, false);
tagged_object = set_tag(object, tag);
- /*
- * Unpoison the whole object.
- * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
- */
- kasan_unpoison(tagged_object, cache->object_size, init);
-
- /* Save alloc info (if possible) for non-kmalloc() allocations. */
- if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
- kasan_save_alloc_info(cache, tagged_object, flags);
+ /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
+ unpoison_slab_object(cache, tagged_object, flags, init);
return tagged_object;
}
@@ -482,7 +489,30 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
{
- kasan_unpoison(ptr, size, false);
+ struct slab *slab;
+ gfp_t flags = 0; /* Might be executing under a lock. */
+
+ if (is_kfence_address(kasan_reset_tag(ptr)))
+ return;
+
+ slab = virt_to_slab(ptr);
+
+ /*
+ * This function can be called for large kmalloc allocation that get
+ * their memory from page_alloc.
+ */
+ if (unlikely(!slab)) {
+ kasan_unpoison(ptr, size, false);
+ poison_kmalloc_large_redzone(ptr, size, flags);
+ return;
+ }
+
+ /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
+ unpoison_slab_object(slab->slab_cache, ptr, size, flags);
+
+ /* Poison the redzone and save alloc info for kmalloc() allocations. */
+ if (is_kmalloc_cache(slab->slab_cache))
+ poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
}
bool __kasan_check_byte(const void *address, unsigned long ip)