diff mbox series

[03/15] kasan: Vmalloc dense tag-based mode support

Message ID a8cfb5d8d93ba48fd5f2defcccac5d758ecd7f39.1738686764.git.maciej.wieczor-retman@intel.com (mailing list archive)
State New
Headers show
Series kasan: x86: arm64: risc-v: KASAN tag-based mode for x86 | expand

Commit Message

Maciej Wieczor-Retman Feb. 4, 2025, 5:33 p.m. UTC
To use KASAN with the vmalloc allocator multiple functions are
implemented that deal with full pages of memory. Many of these functions
are hardcoded to deal with byte aligned shadow memory regions by using
__memset().

With the introduction of the dense mode, tags won't necessarily occupy
whole bytes of shadow memory if the previously allocated memory wasn't
aligned to 32 bytes - which is the coverage of one shadow byte.

Change __memset() calls to kasan_poison(). With dense tag-based mode
enabled that will take care of any unaligned tags in shadow memory.

Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
---
 mm/kasan/kasan.h  |  2 +-
 mm/kasan/shadow.c | 14 ++++++--------
 2 files changed, 7 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index d29bd0e65020..a56aadd51485 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -135,7 +135,7 @@  static inline bool kasan_requires_meta(void)
 
 #define KASAN_GRANULE_MASK	(KASAN_GRANULE_SIZE - 1)
 
-#define KASAN_MEMORY_PER_SHADOW_PAGE	(KASAN_GRANULE_SIZE << PAGE_SHIFT)
+#define KASAN_MEMORY_PER_SHADOW_PAGE	(KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)
 
 #ifdef CONFIG_KASAN_GENERIC
 #define KASAN_PAGE_FREE		0xFF  /* freed page */
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 368503f54b87..94f51046e6ae 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -332,7 +332,7 @@  static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
 	if (!page)
 		return -ENOMEM;
 
-	__memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
+	kasan_poison((void *)page, PAGE_SIZE, KASAN_VMALLOC_INVALID, false);
 	pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
 
 	spin_lock(&init_mm.page_table_lock);
@@ -357,9 +357,6 @@  int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
 	if (!is_vmalloc_or_module_addr((void *)addr))
 		return 0;
 
-	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
-	shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
-
 	/*
 	 * User Mode Linux maps enough shadow memory for all of virtual memory
 	 * at boot, so doesn't need to allocate more on vmalloc, just clear it.
@@ -368,12 +365,12 @@  int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
 	 * reason.
 	 */
 	if (IS_ENABLED(CONFIG_UML)) {
-		__memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start);
+		kasan_poison((void *)addr, size, KASAN_VMALLOC_INVALID, false);
 		return 0;
 	}
 
-	shadow_start = PAGE_ALIGN_DOWN(shadow_start);
-	shadow_end = PAGE_ALIGN(shadow_end);
+	shadow_start = PAGE_ALIGN_DOWN((unsigned long)kasan_mem_to_shadow((void *)addr));
+	shadow_end = PAGE_ALIGN((unsigned long)kasan_mem_to_shadow((void *)addr + size));
 
 	ret = apply_to_page_range(&init_mm, shadow_start,
 				  shadow_end - shadow_start,
@@ -546,7 +543,8 @@  void kasan_release_vmalloc(unsigned long start, unsigned long end,
 	if (shadow_end > shadow_start) {
 		size = shadow_end - shadow_start;
 		if (IS_ENABLED(CONFIG_UML)) {
-			__memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start);
+			kasan_poison((void *)region_start, region_start - region_end,
+				     KASAN_VMALLOC_INVALID, false);
 			return;
 		}
 		apply_to_existing_page_range(&init_mm,