@@ -507,6 +507,14 @@ alternative_endif
load_ttbr1 \page_table, \tmp, \tmp2
.endm
+ .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
+ adrp \tmp1, reserved_pg_dir
+ load_ttbr1 \tmp1, \tmp1, \tmp2
+ tlbi vmalle1
+ dsb nsh
+ isb
+ .endm
+
/*
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
*/
@@ -87,7 +87,7 @@
* x22 create_idmap() .. start_kernel() ID map VA of the DT blob
* x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset
* x24 __primary_switch() linear map KASLR seed
- * x28 create_idmap() callee preserved temp register
+ * x28 create_idmap(), remap_kernel_text() callee preserved temp register
*/
SYM_CODE_START(primary_entry)
bl preserve_boot_args
@@ -380,6 +380,66 @@ SYM_FUNC_START_LOCAL(create_kernel_mapping)
ret
SYM_FUNC_END(create_kernel_mapping)
+SYM_FUNC_START_LOCAL(remap_kernel_text)
+ mov x28, lr
+
+ ldr_l x1, kimage_vaddr
+ mov x2, x1
+ ldr_l x3, .Linitdata_begin
+ adrp x4, _text
+ bic x4, x4, #SWAPPER_BLOCK_SIZE - 1
+ mov x5, SWAPPER_RX_MMUFLAGS
+ mov x6, #SWAPPER_BLOCK_SHIFT
+ bl remap_region
+
+#if SWAPPER_BLOCK_SHIFT > PAGE_SHIFT
+ /*
+ * If the boundary between inittext and initdata happens to be aligned
+ * sufficiently, we are done here. Otherwise, we have to replace its block
+ * entry with a table entry, and populate the lower level table accordingly.
+ */
+ ldr_l x3, .Linitdata_begin
+ tst x3, #SWAPPER_BLOCK_SIZE - 1
+ b.eq 0f
+
+ /* First, create a table mapping to replace the block mapping */
+ ldr_l x1, kimage_vaddr
+ bic x2, x3, #SWAPPER_BLOCK_SIZE - 1
+ adrp x4, init_pg_end - PAGE_SIZE
+ mov x5, #PMD_TYPE_TABLE
+ mov x6, #SWAPPER_BLOCK_SHIFT
+ bl remap_region
+
+ /* Apply executable permissions to the first subregion */
+ adrp x0, init_pg_end - PAGE_SIZE
+ ldr_l x3, .Linitdata_begin
+ bic x1, x3, #SWAPPER_BLOCK_SIZE - 1
+ mov x2, x1
+ adrp x4, __initdata_begin
+ bic x4, x4, #SWAPPER_BLOCK_SIZE - 1
+ mov x5, SWAPPER_RX_MMUFLAGS | PTE_TYPE_PAGE
+ mov x6, #PAGE_SHIFT
+ bl remap_region
+
+ /* Apply writable permissions to the second subregion */
+ ldr_l x2, .Linitdata_begin
+ bic x1, x2, #SWAPPER_BLOCK_SIZE - 1
+ add x3, x1, #SWAPPER_BLOCK_SIZE
+ adrp x4, __initdata_begin
+ mov x5, SWAPPER_RW_MMUFLAGS | PTE_TYPE_PAGE
+ mov x6, #PAGE_SHIFT
+ bl remap_region
+#endif
+0: dsb ishst
+ ret x28
+SYM_FUNC_END(remap_kernel_text)
+
+ __INITDATA
+ .align 3
+.Linitdata_begin:
+ .quad __initdata_begin
+ .previous
+
/*
* Initialize CPU registers with task-specific and cpu-specific context.
*
@@ -808,12 +868,17 @@ SYM_FUNC_START_LOCAL(__primary_switch)
#endif
bl clear_page_tables
bl create_kernel_mapping
-
+#ifdef CONFIG_RELOCATABLE
adrp x1, init_pg_dir
load_ttbr1 x1, x1, x2
-#ifdef CONFIG_RELOCATABLE
- bl __relocate_kernel
+ bl __relocate_kernel // preserves x0
+
+ __idmap_cpu_set_reserved_ttbr1 x1, x2
#endif
+ bl remap_kernel_text
+ adrp x1, init_pg_dir
+ load_ttbr1 x1, x1, x2
+
ldr x8, =__primary_switched
adrp x0, __PHYS_OFFSET
br x8
@@ -310,7 +310,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
init_pg_dir = .;
- . += INIT_DIR_SIZE;
+ . += INIT_DIR_SIZE + PAGE_SIZE;
init_pg_end = .;
. = ALIGN(SEGMENT_ALIGN);
@@ -168,17 +168,6 @@ SYM_FUNC_END(cpu_do_resume)
.pushsection ".idmap.text", "awx"
-.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
- adrp \tmp1, reserved_pg_dir
- phys_to_ttbr \tmp2, \tmp1
- offset_ttbr1 \tmp2, \tmp1
- msr ttbr1_el1, \tmp2
- isb
- tlbi vmalle1
- dsb nsh
- isb
-.endm
-
/*
* void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
*
In order to be able to run with WXN from boot (which could potentially be under a hypervisor regime that mandates this), update the temporary kernel page tables with read-only attributes for the text regions before attempting to execute from them. This is rather straight-forward for 16k and 64k granule configurations, as the split between executable and writable regions is guaranteed to be aligned to the granule used for the early kernel page tables. For 4k, it involves installing a single table entry and populating it accordingly. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/arm64/include/asm/assembler.h | 8 +++ arch/arm64/kernel/head.S | 73 ++++++++++++++++++-- arch/arm64/kernel/vmlinux.lds.S | 2 +- arch/arm64/mm/proc.S | 11 --- 4 files changed, 78 insertions(+), 16 deletions(-)