diff mbox series

[2/4] arm64: omit [_text, _stext) from permanent kernel mapping

Message ID 20201027073209.2897-3-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: head: pad Image header to 64 KB and unmap it | expand

Commit Message

Ard Biesheuvel Oct. 27, 2020, 7:32 a.m. UTC
In a previous patch, we increased the size of the EFI PE/COFF header
to 64 KB, which resulted in the _stext symbol to appear at a fixed
offset of 64 KB into the image.

Since 64 KB is also the largest page size we support, this completely
removes the need to map the first 64 KB of the kernel image, given that
it only contains the arm64 Image header and the EFI header, none of which
we ever access again after booting the kernel. More importantly, we should
avoid an executable mapping of non-executable and not entirely predictable
data, in the unlikely event that we emitted something that looks like an
opcode that could be used as a gadget for speculative execution.

So let's limit the kernel mapping of .text to the [_stext, _etext) region,
which matches the view of generic code (such as kallsyms) when it reasons
about the boundaries of the kernel's .text section.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/kernel/efi-header.S  |  7 -------
 arch/arm64/kernel/setup.c       |  4 ++--
 arch/arm64/kernel/vmlinux.lds.S |  2 +-
 arch/arm64/mm/init.c            |  2 +-
 arch/arm64/mm/mmu.c             | 10 +++++-----
 5 files changed, 9 insertions(+), 16 deletions(-)

Comments

Will Deacon Oct. 28, 2020, 2:10 p.m. UTC | #1
On Tue, Oct 27, 2020 at 08:32:07AM +0100, Ard Biesheuvel wrote:
> In a previous patch, we increased the size of the EFI PE/COFF header
> to 64 KB, which resulted in the _stext symbol to appear at a fixed
> offset of 64 KB into the image.
> 
> Since 64 KB is also the largest page size we support, this completely
> removes the need to map the first 64 KB of the kernel image, given that
> it only contains the arm64 Image header and the EFI header, none of which
> we ever access again after booting the kernel. More importantly, we should
> avoid an executable mapping of non-executable and not entirely predictable
> data, in the unlikely event that we emitted something that looks like an
> opcode that could be used as a gadget for speculative execution.
> 
> So let's limit the kernel mapping of .text to the [_stext, _etext) region,
> which matches the view of generic code (such as kallsyms) when it reasons
> about the boundaries of the kernel's .text section.
> 
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
>  arch/arm64/kernel/efi-header.S  |  7 -------
>  arch/arm64/kernel/setup.c       |  4 ++--
>  arch/arm64/kernel/vmlinux.lds.S |  2 +-
>  arch/arm64/mm/init.c            |  2 +-
>  arch/arm64/mm/mmu.c             | 10 +++++-----
>  5 files changed, 9 insertions(+), 16 deletions(-)

Acked-by: Will Deacon <will@kernel.org>

Will
diff mbox series

Patch

diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S
index a71844fb923e..3ad4aecff033 100644
--- a/arch/arm64/kernel/efi-header.S
+++ b/arch/arm64/kernel/efi-header.S
@@ -140,13 +140,6 @@  efi_debug_entry:
 	.set	efi_debug_entry_size, . - efi_debug_entry
 #endif
 
-	/*
-	 * EFI will load .text onwards at the 4k section alignment
-	 * described in the PE/COFF header. To ensure that instruction
-	 * sequences using an adrp and a :lo12: immediate will function
-	 * correctly at this alignment, we must ensure that .text is
-	 * placed at a 4k boundary in the Image to begin with.
-	 */
 	.balign	SEGMENT_ALIGN
 efi_header_end:
 	.endm
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 133257ffd859..fe1cf52f5f80 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -206,7 +206,7 @@  static void __init request_standard_resources(void)
 	unsigned long i = 0;
 	size_t res_size;
 
-	kernel_code.start   = __pa_symbol(_text);
+	kernel_code.start   = __pa_symbol(_stext);
 	kernel_code.end     = __pa_symbol(__init_begin - 1);
 	kernel_data.start   = __pa_symbol(_sdata);
 	kernel_data.end     = __pa_symbol(_end - 1);
@@ -283,7 +283,7 @@  u64 cpu_logical_map(int cpu)
 
 void __init __no_sanitize_address setup_arch(char **cmdline_p)
 {
-	init_mm.start_code = (unsigned long) _text;
+	init_mm.start_code = (unsigned long) _stext;
 	init_mm.end_code   = (unsigned long) _etext;
 	init_mm.end_data   = (unsigned long) _edata;
 	init_mm.brk	   = (unsigned long) _end;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 6d78c041fdf6..6567d80dd15f 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -121,7 +121,7 @@  SECTIONS
 		_text = .;
 		HEAD_TEXT
 	}
-	.text : {			/* Real text segment		*/
+	.text : ALIGN(SEGMENT_ALIGN) {	/* Real text segment		*/
 		_stext = .;		/* Text and read-only data	*/
 			IRQENTRY_TEXT
 			SOFTIRQENTRY_TEXT
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 095540667f0f..aa438b9d7f40 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -367,7 +367,7 @@  void __init arm64_memblock_init(void)
 	 * Register the kernel text, kernel data, initrd, and initial
 	 * pagetables with memblock.
 	 */
-	memblock_reserve(__pa_symbol(_text), _end - _text);
+	memblock_reserve(__pa_symbol(_stext), _end - _stext);
 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
 		/* the generic initrd code expects virtual addresses */
 		initrd_start = __phys_to_virt(phys_initrd_start);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 1c0f3e02f731..e6f2accaeade 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -464,14 +464,14 @@  void __init mark_linear_text_alias_ro(void)
 	/*
 	 * Remove the write permissions from the linear alias of .text/.rodata
 	 */
-	update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
-			    (unsigned long)__init_begin - (unsigned long)_text,
+	update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext),
+			    (unsigned long)__init_begin - (unsigned long)_stext,
 			    PAGE_KERNEL_RO);
 }
 
 static void __init map_mem(pgd_t *pgdp)
 {
-	phys_addr_t kernel_start = __pa_symbol(_text);
+	phys_addr_t kernel_start = __pa_symbol(_stext);
 	phys_addr_t kernel_end = __pa_symbol(__init_begin);
 	phys_addr_t start, end;
 	int flags = 0;
@@ -506,7 +506,7 @@  static void __init map_mem(pgd_t *pgdp)
 	}
 
 	/*
-	 * Map the linear alias of the [_text, __init_begin) interval
+	 * Map the linear alias of the [_stext, __init_begin) interval
 	 * as non-executable now, and remove the write permission in
 	 * mark_linear_text_alias_ro() below (which will be called after
 	 * alternative patching has completed). This makes the contents
@@ -665,7 +665,7 @@  static void __init map_kernel(pgd_t *pgdp)
 	 * Only rodata will be remapped with different permissions later on,
 	 * all other segments are allowed to use contiguous mappings.
 	 */
-	map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
+	map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0,
 			   VM_NO_GUARD);
 	map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
 			   &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);