diff mbox series

[v4,23/26] arm64: head: remap the kernel text/inittext region read-only

Message ID 20220613144550.3760857-24-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: refactor boot flow and add support for WXN | expand

Commit Message

Ard Biesheuvel June 13, 2022, 2:45 p.m. UTC
In order to be able to run with WXN from boot (which could potentially
be under a hypervisor regime that mandates this), update the temporary
kernel page tables with read-only attributes for the text regions before
attempting to execute from them.

This is rather straight-forward for 16k and 64k granule configurations,
as the split between executable and writable regions is guaranteed to be
aligned to the granule used for the early kernel page tables. For 4k, it
involves installing a single table entry and populating it accordingly.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/include/asm/assembler.h |  8 +++
 arch/arm64/kernel/head.S           | 73 ++++++++++++++++++--
 arch/arm64/kernel/vmlinux.lds.S    |  2 +-
 arch/arm64/mm/proc.S               | 11 ---
 4 files changed, 78 insertions(+), 16 deletions(-)

Comments

Kees Cook June 13, 2022, 4:57 p.m. UTC | #1
On Mon, Jun 13, 2022 at 04:45:47PM +0200, Ard Biesheuvel wrote:
> In order to be able to run with WXN from boot (which could potentially
> be under a hypervisor regime that mandates this), update the temporary
> kernel page tables with read-only attributes for the text regions before
> attempting to execute from them.
> 
> This is rather straight-forward for 16k and 64k granule configurations,
> as the split between executable and writable regions is guaranteed to be
> aligned to the granule used for the early kernel page tables. For 4k, it
> involves installing a single table entry and populating it accordingly.
> 
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

Reviewed-by: Kees Cook <keescook@chromium.org>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index b2584709c332..e1e652410d7d 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -507,6 +507,14 @@  alternative_endif
 	load_ttbr1 \page_table, \tmp, \tmp2
 	.endm
 
+	.macro		__idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
+	adrp		\tmp1, reserved_pg_dir
+	load_ttbr1 	\tmp1, \tmp1, \tmp2
+	tlbi		vmalle1
+	dsb		nsh
+	isb
+	.endm
+
 /*
  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
  */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 6bf685f988f1..92cbad41eed8 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -87,7 +87,7 @@ 
 	 *  x22        create_idmap() .. start_kernel()         ID map VA of the DT blob
 	 *  x23        primary_entry() .. start_kernel()        physical misalignment/KASLR offset
 	 *  x24        __primary_switch()                       linear map KASLR seed
-	 *  x28        create_idmap()                           callee preserved temp register
+	 *  x28        create_idmap(), remap_kernel_text()      callee preserved temp register
 	 */
 SYM_CODE_START(primary_entry)
 	bl	preserve_boot_args
@@ -380,6 +380,66 @@  SYM_FUNC_START_LOCAL(create_kernel_mapping)
 	ret
 SYM_FUNC_END(create_kernel_mapping)
 
+SYM_FUNC_START_LOCAL(remap_kernel_text)
+	mov	x28, lr
+
+	ldr_l	x1, kimage_vaddr
+	mov	x2, x1
+	ldr_l	x3, .Linitdata_begin
+	adrp	x4, _text
+	bic	x4, x4, #SWAPPER_BLOCK_SIZE - 1
+	mov	x5, SWAPPER_RX_MMUFLAGS
+	mov	x6, #SWAPPER_BLOCK_SHIFT
+	bl	remap_region
+
+#if SWAPPER_BLOCK_SHIFT > PAGE_SHIFT
+	/*
+	 * If the boundary between inittext and initdata happens to be aligned
+	 * sufficiently, we are done here. Otherwise, we have to replace its block
+	 * entry with a table entry, and populate the lower level table accordingly.
+	 */
+	ldr_l	x3, .Linitdata_begin
+	tst	x3, #SWAPPER_BLOCK_SIZE - 1
+	b.eq	0f
+
+	/* First, create a table mapping to replace the block mapping */
+	ldr_l	x1, kimage_vaddr
+	bic	x2, x3, #SWAPPER_BLOCK_SIZE - 1
+	adrp	x4, init_pg_end - PAGE_SIZE
+	mov	x5, #PMD_TYPE_TABLE
+	mov	x6, #SWAPPER_BLOCK_SHIFT
+	bl	remap_region
+
+	/* Apply executable permissions to the first subregion */
+	adrp	x0, init_pg_end - PAGE_SIZE
+	ldr_l	x3, .Linitdata_begin
+	bic	x1, x3, #SWAPPER_BLOCK_SIZE - 1
+	mov	x2, x1
+	adrp	x4, __initdata_begin
+	bic	x4, x4, #SWAPPER_BLOCK_SIZE - 1
+	mov	x5, SWAPPER_RX_MMUFLAGS | PTE_TYPE_PAGE
+	mov	x6, #PAGE_SHIFT
+	bl	remap_region
+
+	/* Apply writable permissions to the second subregion */
+	ldr_l	x2, .Linitdata_begin
+	bic	x1, x2, #SWAPPER_BLOCK_SIZE - 1
+	add	x3, x1, #SWAPPER_BLOCK_SIZE
+	adrp	x4, __initdata_begin
+	mov	x5, SWAPPER_RW_MMUFLAGS | PTE_TYPE_PAGE
+	mov	x6, #PAGE_SHIFT
+	bl	remap_region
+#endif
+0:	dsb	ishst
+	ret	x28
+SYM_FUNC_END(remap_kernel_text)
+
+	__INITDATA
+	.align	3
+.Linitdata_begin:
+	.quad	__initdata_begin
+	.previous
+
 	/*
 	 * Initialize CPU registers with task-specific and cpu-specific context.
 	 *
@@ -808,12 +868,17 @@  SYM_FUNC_START_LOCAL(__primary_switch)
 #endif
 	bl	clear_page_tables
 	bl	create_kernel_mapping
-
+#ifdef CONFIG_RELOCATABLE
 	adrp	x1, init_pg_dir
 	load_ttbr1 x1, x1, x2
-#ifdef CONFIG_RELOCATABLE
-	bl	__relocate_kernel
+	bl	__relocate_kernel		// preserves x0
+
+	__idmap_cpu_set_reserved_ttbr1 x1, x2
 #endif
+	bl	remap_kernel_text
+	adrp	x1, init_pg_dir
+	load_ttbr1 x1, x1, x2
+
 	ldr	x8, =__primary_switched
 	adrp	x0, __PHYS_OFFSET
 	br	x8
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 736aca63dad1..3830c6c66e46 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -310,7 +310,7 @@  SECTIONS
 
 	. = ALIGN(PAGE_SIZE);
 	init_pg_dir = .;
-	. += INIT_DIR_SIZE;
+	. += INIT_DIR_SIZE + PAGE_SIZE;
 	init_pg_end = .;
 
 	. = ALIGN(SEGMENT_ALIGN);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 493b8ffc9be5..c237e976b138 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -168,17 +168,6 @@  SYM_FUNC_END(cpu_do_resume)
 
 	.pushsection ".idmap.text", "awx"
 
-.macro	__idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
-	adrp	\tmp1, reserved_pg_dir
-	phys_to_ttbr \tmp2, \tmp1
-	offset_ttbr1 \tmp2, \tmp1
-	msr	ttbr1_el1, \tmp2
-	isb
-	tlbi	vmalle1
-	dsb	nsh
-	isb
-.endm
-
 /*
  * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
  *