diff mbox series

[05/11] PageSlab: eliminate unnecessary compound_head() calls in mm/kasan

Message ID 20211012180148.1669685-6-hannes@cmpxchg.org (mailing list archive)
State New
Headers show
Series PageSlab: eliminate unnecessary compound_head() calls | expand

Commit Message

Johannes Weiner Oct. 12, 2021, 6:01 p.m. UTC
All tested pages come from virt_to_head_page(): either directly or
through kasan_addr_to_page(). Remove the redundant compound_head()
calls. Rename the helper to kasan_addr_to_head_page() to clarify.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/kasan/common.c      | 4 ++--
 mm/kasan/generic.c     | 4 ++--
 mm/kasan/kasan.h       | 2 +-
 mm/kasan/report.c      | 6 +++---
 mm/kasan/report_tags.c | 4 ++--
 5 files changed, 10 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index b5e81273fc6b..2baf121fb8c5 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -411,7 +411,7 @@  void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
 	 * !PageSlab() when the size provided to kmalloc is larger than
 	 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
 	 */
-	if (unlikely(!PageSlab(compound_head(page)))) {
+	if (unlikely(!PageSlab(page))) {
 		if (____kasan_kfree_large(ptr, ip))
 			return;
 		kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
@@ -575,7 +575,7 @@  void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
 	page = virt_to_head_page(object);
 
 	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
-	if (unlikely(!PageSlab(compound_head(page))))
+	if (unlikely(!PageSlab(page)))
 		return __kasan_kmalloc_large(object, size, flags);
 	else
 		return ____kasan_kmalloc(page->slab_cache, object, size, flags);
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 94c0c86c79d9..d4303a6722ab 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -330,12 +330,12 @@  DEFINE_ASAN_SET_SHADOW(f8);
 
 void kasan_record_aux_stack(void *addr)
 {
-	struct page *page = kasan_addr_to_page(addr);
+	struct page *page = kasan_addr_to_head_page(addr);
 	struct kmem_cache *cache;
 	struct kasan_alloc_meta *alloc_meta;
 	void *object;
 
-	if (is_kfence_address(addr) || !(page && PageSlab(compound_head(page))))
+	if (is_kfence_address(addr) || !(page && PageSlab(page)))
 		return;
 
 	cache = page->slab_cache;
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 8bf568a80eb8..fe39eeee6b59 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -249,7 +249,7 @@  bool kasan_report(unsigned long addr, size_t size,
 		bool is_write, unsigned long ip);
 void kasan_report_invalid_free(void *object, unsigned long ip);
 
-struct page *kasan_addr_to_page(const void *addr);
+struct page *kasan_addr_to_head_page(const void *addr);
 
 depot_stack_handle_t kasan_save_stack(gfp_t flags);
 void kasan_set_track(struct kasan_track *track, gfp_t flags);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 7cdcf968f43f..405ecf3c9301 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -151,7 +151,7 @@  static void print_track(struct kasan_track *track, const char *prefix)
 	}
 }
 
-struct page *kasan_addr_to_page(const void *addr)
+struct page *kasan_addr_to_head_page(const void *addr)
 {
 	if ((addr >= (void *)PAGE_OFFSET) &&
 			(addr < high_memory))
@@ -251,12 +251,12 @@  static inline bool init_task_stack_addr(const void *addr)
 
 static void print_address_description(void *addr, u8 tag)
 {
-	struct page *page = kasan_addr_to_page(addr);
+	struct page *page = kasan_addr_to_head_page(addr);
 
 	dump_stack_lvl(KERN_ERR);
 	pr_err("\n");
 
-	if (page && PageSlab(compound_head(page))) {
+	if (page && PageSlab(page)) {
 		struct kmem_cache *cache = page->slab_cache;
 		void *object = nearest_obj(cache, page,	addr);
 
diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
index 32f955d98e76..5ae9df06ed44 100644
--- a/mm/kasan/report_tags.c
+++ b/mm/kasan/report_tags.c
@@ -20,8 +20,8 @@  const char *kasan_get_bug_type(struct kasan_access_info *info)
 
 	tag = get_tag(info->access_addr);
 	addr = kasan_reset_tag(info->access_addr);
-	page = kasan_addr_to_page(addr);
-	if (page && PageSlab(compound_head(page))) {
+	page = kasan_addr_to_head_page(addr);
+	if (page && PageSlab(page)) {
 		cache = page->slab_cache;
 		object = nearest_obj(cache, page, (void *)addr);
 		alloc_meta = kasan_get_alloc_meta(cache, object);