Message ID | 20220624150651.1358849-3-ardb@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: refactor boot flow | expand |
On Fri, Jun 24, 2022 at 05:06:32PM +0200, Ard Biesheuvel wrote: > Currently, we only support 52-bit virtual addressing on 64k pages > configurations, and in all other cases, vabits_actual is guaranteed to > equal VA_BITS (== VA_BITS_MIN). So get rid of the variable entirely in > that case. > > While at it, move the assignment out of the asm entry code - it has no > need to be there. > > Signed-off-by: Ard Biesheuvel <ardb@kernel.org> I see the patch itself checks VA_BITS rather than PAGE_SIZE, (and the former is the right thing to for FEAT_LPA2), so FWIW: Mark Rutland <mark.rutland@arm.com> Mark. > --- > arch/arm64/include/asm/memory.h | 4 ++++ > arch/arm64/kernel/head.S | 15 +-------------- > arch/arm64/mm/init.c | 15 ++++++++++++++- > arch/arm64/mm/mmu.c | 4 +++- > 4 files changed, 22 insertions(+), 16 deletions(-) > > diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h > index 0af70d9abede..c751cd9b94f8 100644 > --- a/arch/arm64/include/asm/memory.h > +++ b/arch/arm64/include/asm/memory.h > @@ -174,7 +174,11 @@ > #include <linux/types.h> > #include <asm/bug.h> > > +#if VA_BITS > 48 > extern u64 vabits_actual; > +#else > +#define vabits_actual ((u64)VA_BITS) > +#endif > > extern s64 memstart_addr; > /* PHYS_OFFSET - the physical address of the start of memory. */ > diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S > index 1cdecce552bb..dc07858eb673 100644 > --- a/arch/arm64/kernel/head.S > +++ b/arch/arm64/kernel/head.S > @@ -293,19 +293,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables) > adrp x0, idmap_pg_dir > adrp x3, __idmap_text_start // __pa(__idmap_text_start) > > -#ifdef CONFIG_ARM64_VA_BITS_52 > - mrs_s x6, SYS_ID_AA64MMFR2_EL1 > - and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT) > - mov x5, #52 > - cbnz x6, 1f > -#endif > - mov x5, #VA_BITS_MIN > -1: > - adr_l x6, vabits_actual > - str x5, [x6] > - dmb sy > - dc ivac, x6 // Invalidate potentially stale cache line > - > /* > * VA_BITS may be too small to allow for an ID mapping to be created > * that covers system RAM if that is located sufficiently high in the > @@ -713,7 +700,7 @@ SYM_FUNC_START(__enable_mmu) > SYM_FUNC_END(__enable_mmu) > > SYM_FUNC_START(__cpu_secondary_check52bitva) > -#ifdef CONFIG_ARM64_VA_BITS_52 > +#if VA_BITS > 48 > ldr_l x0, vabits_actual > cmp x0, #52 > b.ne 2f > diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c > index 339ee84e5a61..1faa6760895e 100644 > --- a/arch/arm64/mm/init.c > +++ b/arch/arm64/mm/init.c > @@ -265,7 +265,20 @@ early_param("mem", early_mem); > > void __init arm64_memblock_init(void) > { > - s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); > + s64 linear_region_size; > + > +#if VA_BITS > 48 > + if (cpuid_feature_extract_unsigned_field( > + read_sysreg_s(SYS_ID_AA64MMFR2_EL1), > + ID_AA64MMFR2_LVA_SHIFT)) > + vabits_actual = VA_BITS; > + > + /* make the variable visible to secondaries with the MMU off */ > + dcache_clean_inval_poc((u64)&vabits_actual, > + (u64)&vabits_actual + sizeof(vabits_actual)); > +#endif > + > + linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); > > /* > * Corner case: 52-bit VA capable systems running KVM in nVHE mode may > diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c > index 7148928e3932..a6392656d589 100644 > --- a/arch/arm64/mm/mmu.c > +++ b/arch/arm64/mm/mmu.c > @@ -46,8 +46,10 @@ > u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN); > u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; > > -u64 __section(".mmuoff.data.write") vabits_actual; > +#if VA_BITS > 48 > +u64 vabits_actual __ro_after_init = VA_BITS_MIN; > EXPORT_SYMBOL(vabits_actual); > +#endif > > u64 kimage_vaddr __ro_after_init = (u64)&_text; > EXPORT_SYMBOL(kimage_vaddr); > -- > 2.35.1 >
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 0af70d9abede..c751cd9b94f8 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -174,7 +174,11 @@ #include <linux/types.h> #include <asm/bug.h> +#if VA_BITS > 48 extern u64 vabits_actual; +#else +#define vabits_actual ((u64)VA_BITS) +#endif extern s64 memstart_addr; /* PHYS_OFFSET - the physical address of the start of memory. */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 1cdecce552bb..dc07858eb673 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -293,19 +293,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables) adrp x0, idmap_pg_dir adrp x3, __idmap_text_start // __pa(__idmap_text_start) -#ifdef CONFIG_ARM64_VA_BITS_52 - mrs_s x6, SYS_ID_AA64MMFR2_EL1 - and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT) - mov x5, #52 - cbnz x6, 1f -#endif - mov x5, #VA_BITS_MIN -1: - adr_l x6, vabits_actual - str x5, [x6] - dmb sy - dc ivac, x6 // Invalidate potentially stale cache line - /* * VA_BITS may be too small to allow for an ID mapping to be created * that covers system RAM if that is located sufficiently high in the @@ -713,7 +700,7 @@ SYM_FUNC_START(__enable_mmu) SYM_FUNC_END(__enable_mmu) SYM_FUNC_START(__cpu_secondary_check52bitva) -#ifdef CONFIG_ARM64_VA_BITS_52 +#if VA_BITS > 48 ldr_l x0, vabits_actual cmp x0, #52 b.ne 2f diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 339ee84e5a61..1faa6760895e 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -265,7 +265,20 @@ early_param("mem", early_mem); void __init arm64_memblock_init(void) { - s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); + s64 linear_region_size; + +#if VA_BITS > 48 + if (cpuid_feature_extract_unsigned_field( + read_sysreg_s(SYS_ID_AA64MMFR2_EL1), + ID_AA64MMFR2_LVA_SHIFT)) + vabits_actual = VA_BITS; + + /* make the variable visible to secondaries with the MMU off */ + dcache_clean_inval_poc((u64)&vabits_actual, + (u64)&vabits_actual + sizeof(vabits_actual)); +#endif + + linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); /* * Corner case: 52-bit VA capable systems running KVM in nVHE mode may diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 7148928e3932..a6392656d589 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -46,8 +46,10 @@ u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN); u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; -u64 __section(".mmuoff.data.write") vabits_actual; +#if VA_BITS > 48 +u64 vabits_actual __ro_after_init = VA_BITS_MIN; EXPORT_SYMBOL(vabits_actual); +#endif u64 kimage_vaddr __ro_after_init = (u64)&_text; EXPORT_SYMBOL(kimage_vaddr);
Currently, we only support 52-bit virtual addressing on 64k pages configurations, and in all other cases, vabits_actual is guaranteed to equal VA_BITS (== VA_BITS_MIN). So get rid of the variable entirely in that case. While at it, move the assignment out of the asm entry code - it has no need to be there. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/arm64/include/asm/memory.h | 4 ++++ arch/arm64/kernel/head.S | 15 +-------------- arch/arm64/mm/init.c | 15 ++++++++++++++- arch/arm64/mm/mmu.c | 4 +++- 4 files changed, 22 insertions(+), 16 deletions(-)