@@ -411,7 +411,7 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
* !PageSlab() when the size provided to kmalloc is larger than
* KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
*/
- if (unlikely(!PageSlab(compound_head(page)))) {
+ if (unlikely(!PageSlab(page))) {
if (____kasan_kfree_large(ptr, ip))
return;
kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
@@ -575,7 +575,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
page = virt_to_head_page(object);
/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
- if (unlikely(!PageSlab(compound_head(page))))
+ if (unlikely(!PageSlab(page)))
return __kasan_kmalloc_large(object, size, flags);
else
return ____kasan_kmalloc(page->slab_cache, object, size, flags);
@@ -330,12 +330,12 @@ DEFINE_ASAN_SET_SHADOW(f8);
void kasan_record_aux_stack(void *addr)
{
- struct page *page = kasan_addr_to_page(addr);
+ struct page *page = kasan_addr_to_head_page(addr);
struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_meta;
void *object;
- if (is_kfence_address(addr) || !(page && PageSlab(compound_head(page))))
+ if (is_kfence_address(addr) || !(page && PageSlab(page)))
return;
cache = page->slab_cache;
@@ -249,7 +249,7 @@ bool kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
void kasan_report_invalid_free(void *object, unsigned long ip);
-struct page *kasan_addr_to_page(const void *addr);
+struct page *kasan_addr_to_head_page(const void *addr);
depot_stack_handle_t kasan_save_stack(gfp_t flags);
void kasan_set_track(struct kasan_track *track, gfp_t flags);
@@ -151,7 +151,7 @@ static void print_track(struct kasan_track *track, const char *prefix)
}
}
-struct page *kasan_addr_to_page(const void *addr)
+struct page *kasan_addr_to_head_page(const void *addr)
{
if ((addr >= (void *)PAGE_OFFSET) &&
(addr < high_memory))
@@ -251,12 +251,12 @@ static inline bool init_task_stack_addr(const void *addr)
static void print_address_description(void *addr, u8 tag)
{
- struct page *page = kasan_addr_to_page(addr);
+ struct page *page = kasan_addr_to_head_page(addr);
dump_stack_lvl(KERN_ERR);
pr_err("\n");
- if (page && PageSlab(compound_head(page))) {
+ if (page && PageSlab(page)) {
struct kmem_cache *cache = page->slab_cache;
void *object = nearest_obj(cache, page, addr);
@@ -20,8 +20,8 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
tag = get_tag(info->access_addr);
addr = kasan_reset_tag(info->access_addr);
- page = kasan_addr_to_page(addr);
- if (page && PageSlab(compound_head(page))) {
+ page = kasan_addr_to_head_page(addr);
+ if (page && PageSlab(page)) {
cache = page->slab_cache;
object = nearest_obj(cache, page, (void *)addr);
alloc_meta = kasan_get_alloc_meta(cache, object);
All tested pages come from virt_to_head_page(): either directly or through kasan_addr_to_page(). Remove the redundant compound_head() calls. Rename the helper to kasan_addr_to_head_page() to clarify. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> --- mm/kasan/common.c | 4 ++-- mm/kasan/generic.c | 4 ++-- mm/kasan/kasan.h | 2 +- mm/kasan/report.c | 6 +++--- mm/kasan/report_tags.c | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-)