diff mbox series

[v2,07/19] arm64: mm: Handle LVA support as a CPU feature

Message ID 20221124123932.2648991-8-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: Enable LPA2 support for 4k and 16k pages | expand

Commit Message

Ard Biesheuvel Nov. 24, 2022, 12:39 p.m. UTC
Currently, we detect CPU support for 52-bit virtual addressing (LVA)
extremely early, before creating the kernel page tables or enabling the
MMU. We cannot override the feature this early, and so large virtual
addressing is always enabled on CPUs that implement support for it if
the software support for it was enabled at build time. It also means we
rely on non-trivial code in asm to deal with this feature.

Given that both the ID map and the TTBR1 mapping of the kernel image are
guaranteed to be 48-bit addressable, it is not actually necessary to
enable support this early, and instead, we can model it as a CPU
feature. That way, we can rely on code patching to get the correct
TCR.T1SZ values programmed on secondary boot and suspend from resume.

On the primary boot path, we simply enable the MMU with 48-bit virtual
addressing initially, and update TCR.T1SZ if LVA is supported from C
code, right before creating the kernel mapping. Given that TTBR1 still
points to reserved_pg_dir at this point, updating TCR.T1SZ should be
safe without the need for explicit TLB maintenance.

Since this gets rid of all accesses to the vabits_actual variable from
asm code that occurred before TCR.T1SZ had been programmed, we no longer
have a need for this variable, and we can replace it with a C expression
that produces the correct value directly, based on the value of TCR.T1SZ.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/include/asm/memory.h   | 13 ++++++++++-
 arch/arm64/kernel/cpufeature.c    | 13 +++++++++++
 arch/arm64/kernel/head.S          | 24 +++-----------------
 arch/arm64/kernel/pi/map_kernel.c | 12 ++++++++++
 arch/arm64/kernel/sleep.S         |  3 ---
 arch/arm64/mm/mmu.c               |  5 ----
 arch/arm64/mm/proc.S              | 17 +++++++-------
 arch/arm64/tools/cpucaps          |  1 +
 8 files changed, 49 insertions(+), 39 deletions(-)

Comments

Ryan Roberts Nov. 28, 2022, 2:54 p.m. UTC | #1
Hi Ard,

On 24/11/2022 12:39, Ard Biesheuvel wrote:
> Currently, we detect CPU support for 52-bit virtual addressing (LVA)
> extremely early, before creating the kernel page tables or enabling the
> MMU. We cannot override the feature this early, and so large virtual
> addressing is always enabled on CPUs that implement support for it if
> the software support for it was enabled at build time. It also means we
> rely on non-trivial code in asm to deal with this feature.
> 
> Given that both the ID map and the TTBR1 mapping of the kernel image are
> guaranteed to be 48-bit addressable, it is not actually necessary to
> enable support this early, and instead, we can model it as a CPU
> feature. That way, we can rely on code patching to get the correct
> TCR.T1SZ values programmed on secondary boot and suspend from resume.

nit: I think you mean "resume from suspend"?

> 
> On the primary boot path, we simply enable the MMU with 48-bit virtual
> addressing initially, and update TCR.T1SZ if LVA is supported from C
> code, right before creating the kernel mapping. Given that TTBR1 still
> points to reserved_pg_dir at this point, updating TCR.T1SZ should be
> safe without the need for explicit TLB maintenance.
> 
> Since this gets rid of all accesses to the vabits_actual variable from
> asm code that occurred before TCR.T1SZ had been programmed, we no longer
> have a need for this variable, and we can replace it with a C expression
> that produces the correct value directly, based on the value of TCR.T1SZ.
> 
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
>  arch/arm64/include/asm/memory.h   | 13 ++++++++++-
>  arch/arm64/kernel/cpufeature.c    | 13 +++++++++++
>  arch/arm64/kernel/head.S          | 24 +++-----------------
>  arch/arm64/kernel/pi/map_kernel.c | 12 ++++++++++
>  arch/arm64/kernel/sleep.S         |  3 ---
>  arch/arm64/mm/mmu.c               |  5 ----
>  arch/arm64/mm/proc.S              | 17 +++++++-------
>  arch/arm64/tools/cpucaps          |  1 +
>  8 files changed, 49 insertions(+), 39 deletions(-)
> 
> [...]
Thanks,
Ryan
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index a4e1d832a15a..b3826ff2e52b 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -183,9 +183,20 @@ 
 #include <asm/boot.h>
 #include <asm/bug.h>
 #include <asm/sections.h>
+#include <asm/sysreg.h>
+
+static inline u64 __pure read_tcr(void)
+{
+	u64  tcr;
+
+	// read_sysreg() uses asm volatile, so avoid it here
+	asm("mrs %0, tcr_el1" : "=r"(tcr));
+	return tcr;
+}
 
 #if VA_BITS > 48
-extern u64			vabits_actual;
+// For reasons of #include hell, we can't use TCR_T1SZ_OFFSET/TCR_T1SZ_MASK here
+#define vabits_actual		(64 - ((read_tcr() >> 16) & 63))
 #else
 #define vabits_actual		((u64)VA_BITS)
 #endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index eca9df123a8b..b44aece5024c 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2654,6 +2654,19 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.matches = has_cpuid_feature,
 		.cpu_enable = cpu_trap_el0_impdef,
 	},
+#ifdef CONFIG_ARM64_VA_BITS_52
+	{
+		.desc = "52-bit Virtual Addressing (LVA)",
+		.capability = ARM64_HAS_LVA,
+		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+		.sys_reg = SYS_ID_AA64MMFR2_EL1,
+		.sign = FTR_UNSIGNED,
+		.field_width = 4,
+		.field_pos = ID_AA64MMFR2_EL1_VARange_SHIFT,
+		.matches = has_cpuid_feature,
+		.min_field_value = ID_AA64MMFR2_EL1_VARange_52,
+	},
+#endif
 	{},
 };
 
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a37525a5ee34..d423ff78474e 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -80,7 +80,6 @@ 
 	 *  x20        primary_entry() .. __primary_switch()    CPU boot mode
 	 *  x21        primary_entry() .. start_kernel()        FDT pointer passed at boot in x0
 	 *  x22        create_idmap() .. start_kernel()         ID map VA of the DT blob
-	 *  x25        primary_entry() .. start_kernel()        supported VA size
 	 *  x28        create_idmap()                           callee preserved temp register
 	 */
 SYM_CODE_START(primary_entry)
@@ -95,14 +94,6 @@  SYM_CODE_START(primary_entry)
 	 * On return, the CPU will be ready for the MMU to be turned on and
 	 * the TCR will have been set.
 	 */
-#if VA_BITS > 48
-	mrs_s	x0, SYS_ID_AA64MMFR2_EL1
-	tst	x0, #0xf << ID_AA64MMFR2_EL1_VARange_SHIFT
-	mov	x0, #VA_BITS
-	mov	x25, #VA_BITS_MIN
-	csel	x25, x25, x0, eq
-	mov	x0, x25
-#endif
 	bl	__cpu_setup			// initialise processor
 	b	__primary_switch
 SYM_CODE_END(primary_entry)
@@ -402,11 +393,6 @@  SYM_FUNC_START_LOCAL(__primary_switched)
 	mov	x0, x20
 	bl	set_cpu_boot_mode_flag
 
-#if VA_BITS > 48
-	adr_l	x8, vabits_actual		// Set this early so KASAN early init
-	str	x25, [x8]			// ... observes the correct value
-	dc	civac, x8			// Make visible to booting secondaries
-#endif
 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 	bl	kasan_early_init
 #endif
@@ -521,9 +507,6 @@  SYM_FUNC_START_LOCAL(secondary_startup)
 	mov	x20, x0				// preserve boot mode
 	bl	finalise_el2
 	bl	__cpu_secondary_check52bitva
-#if VA_BITS > 48
-	ldr_l	x0, vabits_actual
-#endif
 	bl	__cpu_setup			// initialise processor
 	adrp	x1, swapper_pg_dir
 	adrp	x2, idmap_pg_dir
@@ -624,10 +607,9 @@  SYM_FUNC_END(__enable_mmu)
 
 SYM_FUNC_START(__cpu_secondary_check52bitva)
 #if VA_BITS > 48
-	ldr_l	x0, vabits_actual
-	cmp	x0, #52
-	b.ne	2f
-
+alternative_if_not ARM64_HAS_LVA
+	ret
+alternative_else_nop_endif
 	mrs_s	x0, SYS_ID_AA64MMFR2_EL1
 	and	x0, x0, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
 	cbnz	x0, 2f
diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
index 36537d1b837b..7dd6daee0ffd 100644
--- a/arch/arm64/kernel/pi/map_kernel.c
+++ b/arch/arm64/kernel/pi/map_kernel.c
@@ -122,6 +122,15 @@  static bool __init arm64_early_this_cpu_has_e0pd(void)
 						    ID_AA64MMFR2_EL1_E0PD_SHIFT);
 }
 
+static bool __init arm64_early_this_cpu_has_lva(void)
+{
+	u64 mmfr2;
+
+	mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+	return cpuid_feature_extract_unsigned_field(mmfr2,
+						    ID_AA64MMFR2_EL1_VARange_SHIFT);
+}
+
 static bool __init arm64_early_this_cpu_has_pac(void)
 {
 	u64 isar1, isar2;
@@ -284,6 +293,9 @@  asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
 			arm64_use_ng_mappings = true;
 	}
 
+	if (VA_BITS == 52 && arm64_early_this_cpu_has_lva())
+		sysreg_clear_set(tcr_el1, TCR_T1SZ_MASK, TCR_T1SZ(VA_BITS));
+
 	va_base = KIMAGE_VADDR + kaslr_offset;
 	map_kernel(kaslr_offset, va_base - pa_base, root_level);
 }
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 97c9de57725d..617f78ad43a1 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -101,9 +101,6 @@  SYM_FUNC_END(__cpu_suspend_enter)
 SYM_CODE_START(cpu_resume)
 	bl	init_kernel_el
 	bl	finalise_el2
-#if VA_BITS > 48
-	ldr_l	x0, vabits_actual
-#endif
 	bl	__cpu_setup
 	/* enable the MMU early - so we can access sleep_save_stash by va */
 	adrp	x1, swapper_pg_dir
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index d083ac6a0764..b0c702e5bf66 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -45,11 +45,6 @@ 
 
 int idmap_t0sz __ro_after_init;
 
-#if VA_BITS > 48
-u64 vabits_actual __ro_after_init = VA_BITS_MIN;
-EXPORT_SYMBOL(vabits_actual);
-#endif
-
 u64 kimage_voffset __ro_after_init;
 EXPORT_SYMBOL(kimage_voffset);
 
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index ae2caab15ac8..e14648ca820b 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -400,8 +400,6 @@  SYM_FUNC_END(idmap_kpti_install_ng_mappings)
  *
  *	Initialise the processor for turning the MMU on.
  *
- * Input:
- *	x0 - actual number of VA bits (ignored unless VA_BITS > 48)
  * Output:
  *	Return in x0 the value of the SCTLR_EL1 register.
  */
@@ -426,20 +424,21 @@  SYM_FUNC_START(__cpu_setup)
 	mair	.req	x17
 	tcr	.req	x16
 	mov_q	mair, MAIR_EL1_SET
-	mov_q	tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
+	mov_q	tcr, TCR_TxSZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
 			TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
 			TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
 
 	tcr_clear_errata_bits tcr, x9, x5
 
-#ifdef CONFIG_ARM64_VA_BITS_52
-	sub		x9, xzr, x0
-	add		x9, x9, #64
-	tcr_set_t1sz	tcr, x9
-#else
+#if VA_BITS < 48
 	idmap_get_t0sz	x9
-#endif
 	tcr_set_t0sz	tcr, x9
+#elif VA_BITS > VA_BITS_MIN
+	mov		x9, #64 - VA_BITS
+alternative_if ARM64_HAS_LVA
+	tcr_set_t1sz	tcr, x9
+alternative_else_nop_endif
+#endif
 
 	/*
 	 * Set the IPS bits in TCR_EL1.
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index f1c0347ec31a..ec650a2cf433 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -30,6 +30,7 @@  HAS_GENERIC_AUTH_IMP_DEF
 HAS_IRQ_PRIO_MASKING
 HAS_LDAPR
 HAS_LSE_ATOMICS
+HAS_LVA
 HAS_NO_FPSIMD
 HAS_NO_HW_PREFETCH
 HAS_PAN