diff mbox series

arm64: mm: Enable KASAN for 16k/48-bit VA configurations

Message ID 20221117192751.1956624-1-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: mm: Enable KASAN for 16k/48-bit VA configurations | expand

Commit Message

Ard Biesheuvel Nov. 17, 2022, 7:27 p.m. UTC
We never bothered with enabling KASAN on 16k pages configurations with
48-bit virtual addressing, given that you could get 47 bits of VA space
with 3 levels of paging instead of 4. However, once we enable LPA2
support, 4 levels of paging are needed to access the entire 52-bit VA
space, making this configuration more relevant, as it is what you get
when you run such a kernel on hardware that does not implement LPA2.

The KASAN init code assumes that the shadow memory subhierarchy does not
share any top level PGD entries with other users of the kernel VA space,
but given that 16k/48-bit only has two top level entries (as this level
only translates a single bit in the VA), the init code needs some rework
to deal with this specific case.

So relax the build time requirement that the start of the shadow memory
region is PGDIR_SIZE aligned, and if it is not, recurse one level and
clone the next level page table as well as the top level one. Note that
the end address of the shadow region is still guaranteed to be
PGDIR_SIZE aligned, and so the shadow region is always covered by a
single pgd_t entry, and therefore a single pud_t[] table.

Cc: Will Deacon <will@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/Kconfig         |  2 +-
 arch/arm64/mm/kasan_init.c | 34 ++++++++++++++++++--
 2 files changed, 32 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 505c8a1ccbe0cd04..7a3f109bfd69dec9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -153,7 +153,7 @@  config ARM64
 	select HAVE_ARCH_HUGE_VMAP
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_JUMP_LABEL_RELATIVE
-	select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
+	select HAVE_ARCH_KASAN
 	select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
 	select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
 	select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index e969e68de005fd2a..2c1869102dfc7beb 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -170,8 +170,8 @@  asmlinkage void __init kasan_early_init(void)
 {
 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
-	BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
-	BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
+	BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PUD_SIZE));
+	BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PUD_SIZE));
 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
 	kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
 			   true);
@@ -211,6 +211,15 @@  static void __init clear_pgds(unsigned long start,
 		set_pgd(pgd_offset_k(start), __pgd(0));
 }
 
+static void __init clear_puds(unsigned long start, unsigned long end)
+{
+	pgd_t *pgd = pgd_offset_k(start);
+	pud_t *pud = pud_offset_kimg(p4d_offset(pgd, start), start);
+
+	for (; start < end; start += PUD_SIZE)
+		set_pud(pud++, __pud(0));
+}
+
 static void __init kasan_init_shadow(void)
 {
 	u64 kimg_shadow_start, kimg_shadow_end;
@@ -235,10 +244,29 @@  static void __init kasan_init_shadow(void)
 	 * setup will be finished.
 	 */
 	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
+	if (KASAN_SHADOW_START % PGDIR_SIZE) {
+		/*
+		 * We need some special handling when the shadow memory range
+		 * shares a pgd_t entry with the linear region. This can only
+		 * happen when running with 16k pages/48-bit VAs, in which case
+		 * the root level table has only two entries.
+		 */
+		static pud_t pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
+		u64 addr = KASAN_SHADOW_START & PGDIR_MASK;
+		pgd_t *pgdp = pgd_offset_k(addr);
+		pud_t *pudp = pud_offset_kimg(p4d_offset(pgdp, addr), addr);
+
+		memcpy(pud, pudp, sizeof(pud));
+		tmp_pg_dir[0] = __pgd(__phys_to_pgd_val(__pa_symbol(pud)) |
+				      PUD_TYPE_TABLE);
+	}
 	dsb(ishst);
 	cpu_replace_ttbr1(lm_alias(tmp_pg_dir), idmap_pg_dir);
 
-	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+	if (KASAN_SHADOW_START % PGDIR_SIZE)
+		clear_puds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+	else
+		clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
 	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
 			   early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));