[v7,09/12] x86/boot/64: Adapt assembly for PIE support
diff mbox series

Message ID 20190520231948.49693-10-thgarnie@chromium.org
State New
Headers show
Series
  • x86: PIE support to extend KASLR randomization
Related show

Commit Message

Thomas Garnier May 20, 2019, 11:19 p.m. UTC
From: Thomas Garnier <thgarnie@google.com>

Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Early at boot, the kernel is mapped at a temporary address while preparing
the page table. To know the changes needed for the page table with KASLR,
the boot code calculate the difference between the expected address of the
kernel and the one chosen by KASLR. It does not work with PIE because all
symbols in code are relatives. Instead of getting the future relocated
virtual address, you will get the current temporary mapping.
Instructions were changed to have absolute 64-bit references.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0xffffffff80000000.

Signed-off-by: Thomas Garnier <thgarnie@google.com>
---
 arch/x86/kernel/head_64.S | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

Comments

Kees Cook June 10, 2019, 10:26 p.m. UTC | #1
On Mon, May 20, 2019 at 04:19:34PM -0700, Thomas Garnier wrote:
> From: Thomas Garnier <thgarnie@google.com>
> 
> Change the assembly code to use only relative references of symbols for the
> kernel to be PIE compatible.
> 
> Early at boot, the kernel is mapped at a temporary address while preparing
> the page table. To know the changes needed for the page table with KASLR,
> the boot code calculate the difference between the expected address of the
> kernel and the one chosen by KASLR. It does not work with PIE because all
> symbols in code are relatives. Instead of getting the future relocated
> virtual address, you will get the current temporary mapping.
> Instructions were changed to have absolute 64-bit references.
> 
> Position Independent Executable (PIE) support will allow to extend the
> KASLR randomization range below 0xffffffff80000000.
> 
> Signed-off-by: Thomas Garnier <thgarnie@google.com>

Reviewed-by: Kees Cook <keescook@chromium.org>

-Kees

> ---
>  arch/x86/kernel/head_64.S | 16 ++++++++++------
>  1 file changed, 10 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
> index bcd206c8ac90..64a4f0a22b20 100644
> --- a/arch/x86/kernel/head_64.S
> +++ b/arch/x86/kernel/head_64.S
> @@ -90,8 +90,10 @@ startup_64:
>  	popq	%rsi
>  
>  	/* Form the CR3 value being sure to include the CR3 modifier */
> -	addq	$(early_top_pgt - __START_KERNEL_map), %rax
> +	movabs  $(early_top_pgt - __START_KERNEL_map), %rcx
> +	addq    %rcx, %rax
>  	jmp 1f
> +
>  ENTRY(secondary_startup_64)
>  	UNWIND_HINT_EMPTY
>  	/*
> @@ -120,7 +122,8 @@ ENTRY(secondary_startup_64)
>  	popq	%rsi
>  
>  	/* Form the CR3 value being sure to include the CR3 modifier */
> -	addq	$(init_top_pgt - __START_KERNEL_map), %rax
> +	movabs	$(init_top_pgt - __START_KERNEL_map), %rcx
> +	addq    %rcx, %rax
>  1:
>  
>  	/* Enable PAE mode, PGE and LA57 */
> @@ -138,7 +141,7 @@ ENTRY(secondary_startup_64)
>  	movq	%rax, %cr3
>  
>  	/* Ensure I am executing from virtual addresses */
> -	movq	$1f, %rax
> +	movabs  $1f, %rax
>  	ANNOTATE_RETPOLINE_SAFE
>  	jmp	*%rax
>  1:
> @@ -235,11 +238,12 @@ ENTRY(secondary_startup_64)
>  	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
>  	 *		address given in m16:64.
>  	 */
> -	pushq	$.Lafter_lret	# put return address on stack for unwinder
> +	movabs  $.Lafter_lret, %rax
> +	pushq	%rax		# put return address on stack for unwinder
>  	xorl	%ebp, %ebp	# clear frame pointer
> -	movq	initial_code(%rip), %rax
> +	leaq	initial_code(%rip), %rax
>  	pushq	$__KERNEL_CS	# set correct cs
> -	pushq	%rax		# target address in negative space
> +	pushq	(%rax)		# target address in negative space
>  	lretq
>  .Lafter_lret:
>  END(secondary_startup_64)
> -- 
> 2.21.0.1020.gf2820cf01a-goog
>

Patch
diff mbox series

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index bcd206c8ac90..64a4f0a22b20 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -90,8 +90,10 @@  startup_64:
 	popq	%rsi
 
 	/* Form the CR3 value being sure to include the CR3 modifier */
-	addq	$(early_top_pgt - __START_KERNEL_map), %rax
+	movabs  $(early_top_pgt - __START_KERNEL_map), %rcx
+	addq    %rcx, %rax
 	jmp 1f
+
 ENTRY(secondary_startup_64)
 	UNWIND_HINT_EMPTY
 	/*
@@ -120,7 +122,8 @@  ENTRY(secondary_startup_64)
 	popq	%rsi
 
 	/* Form the CR3 value being sure to include the CR3 modifier */
-	addq	$(init_top_pgt - __START_KERNEL_map), %rax
+	movabs	$(init_top_pgt - __START_KERNEL_map), %rcx
+	addq    %rcx, %rax
 1:
 
 	/* Enable PAE mode, PGE and LA57 */
@@ -138,7 +141,7 @@  ENTRY(secondary_startup_64)
 	movq	%rax, %cr3
 
 	/* Ensure I am executing from virtual addresses */
-	movq	$1f, %rax
+	movabs  $1f, %rax
 	ANNOTATE_RETPOLINE_SAFE
 	jmp	*%rax
 1:
@@ -235,11 +238,12 @@  ENTRY(secondary_startup_64)
 	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
 	 *		address given in m16:64.
 	 */
-	pushq	$.Lafter_lret	# put return address on stack for unwinder
+	movabs  $.Lafter_lret, %rax
+	pushq	%rax		# put return address on stack for unwinder
 	xorl	%ebp, %ebp	# clear frame pointer
-	movq	initial_code(%rip), %rax
+	leaq	initial_code(%rip), %rax
 	pushq	$__KERNEL_CS	# set correct cs
-	pushq	%rax		# target address in negative space
+	pushq	(%rax)		# target address in negative space
 	lretq
 .Lafter_lret:
 END(secondary_startup_64)