diff mbox series

[v7,08/12] x86/acpi: Adapt assembly for PIE support

Message ID 20190520231948.49693-9-thgarnie@chromium.org (mailing list archive)
State New, archived
Headers show
Series x86: PIE support to extend KASLR randomization | expand

Commit Message

Thomas Garnier May 20, 2019, 11:19 p.m. UTC
From: Thomas Garnier <thgarnie@google.com>

Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extend the
KASLR randomization range below 0xffffffff80000000.

Signed-off-by: Thomas Garnier <thgarnie@google.com>
Acked-by: Pavel Machek <pavel@ucw.cz>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
---
 arch/x86/kernel/acpi/wakeup_64.S | 31 ++++++++++++++++---------------
 1 file changed, 16 insertions(+), 15 deletions(-)

Comments

Kees Cook June 10, 2019, 11:52 p.m. UTC | #1
On Mon, May 20, 2019 at 04:19:33PM -0700, Thomas Garnier wrote:
> From: Thomas Garnier <thgarnie@google.com>
> 
> Change the assembly code to use only relative references of symbols for the
> kernel to be PIE compatible.
> 
> Position Independent Executable (PIE) support will allow to extend the
> KASLR randomization range below 0xffffffff80000000.
> 
> Signed-off-by: Thomas Garnier <thgarnie@google.com>

Reviewed-by: Kees Cook <keescook@chromium.org>

-Kees

> Acked-by: Pavel Machek <pavel@ucw.cz>
> Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> ---
>  arch/x86/kernel/acpi/wakeup_64.S | 31 ++++++++++++++++---------------
>  1 file changed, 16 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
> index 510fa12aab73..e080e943e295 100644
> --- a/arch/x86/kernel/acpi/wakeup_64.S
> +++ b/arch/x86/kernel/acpi/wakeup_64.S
> @@ -14,7 +14,7 @@
>  	 * Hooray, we are in Long 64-bit mode (but still running in low memory)
>  	 */
>  ENTRY(wakeup_long64)
> -	movq	saved_magic, %rax
> +	movq	saved_magic(%rip), %rax
>  	movq	$0x123456789abcdef0, %rdx
>  	cmpq	%rdx, %rax
>  	jne	bogus_64_magic
> @@ -25,14 +25,14 @@ ENTRY(wakeup_long64)
>  	movw	%ax, %es
>  	movw	%ax, %fs
>  	movw	%ax, %gs
> -	movq	saved_rsp, %rsp
> +	movq	saved_rsp(%rip), %rsp
>  
> -	movq	saved_rbx, %rbx
> -	movq	saved_rdi, %rdi
> -	movq	saved_rsi, %rsi
> -	movq	saved_rbp, %rbp
> +	movq	saved_rbx(%rip), %rbx
> +	movq	saved_rdi(%rip), %rdi
> +	movq	saved_rsi(%rip), %rsi
> +	movq	saved_rbp(%rip), %rbp
>  
> -	movq	saved_rip, %rax
> +	movq	saved_rip(%rip), %rax
>  	jmp	*%rax
>  ENDPROC(wakeup_long64)
>  
> @@ -45,7 +45,7 @@ ENTRY(do_suspend_lowlevel)
>  	xorl	%eax, %eax
>  	call	save_processor_state
>  
> -	movq	$saved_context, %rax
> +	leaq	saved_context(%rip), %rax
>  	movq	%rsp, pt_regs_sp(%rax)
>  	movq	%rbp, pt_regs_bp(%rax)
>  	movq	%rsi, pt_regs_si(%rax)
> @@ -64,13 +64,14 @@ ENTRY(do_suspend_lowlevel)
>  	pushfq
>  	popq	pt_regs_flags(%rax)
>  
> -	movq	$.Lresume_point, saved_rip(%rip)
> +	leaq	.Lresume_point(%rip), %rax
> +	movq	%rax, saved_rip(%rip)
>  
> -	movq	%rsp, saved_rsp
> -	movq	%rbp, saved_rbp
> -	movq	%rbx, saved_rbx
> -	movq	%rdi, saved_rdi
> -	movq	%rsi, saved_rsi
> +	movq	%rsp, saved_rsp(%rip)
> +	movq	%rbp, saved_rbp(%rip)
> +	movq	%rbx, saved_rbx(%rip)
> +	movq	%rdi, saved_rdi(%rip)
> +	movq	%rsi, saved_rsi(%rip)
>  
>  	addq	$8, %rsp
>  	movl	$3, %edi
> @@ -82,7 +83,7 @@ ENTRY(do_suspend_lowlevel)
>  	.align 4
>  .Lresume_point:
>  	/* We don't restore %rax, it must be 0 anyway */
> -	movq	$saved_context, %rax
> +	leaq	saved_context(%rip), %rax
>  	movq	saved_context_cr4(%rax), %rbx
>  	movq	%rbx, %cr4
>  	movq	saved_context_cr3(%rax), %rbx
> -- 
> 2.21.0.1020.gf2820cf01a-goog
>
diff mbox series

Patch

diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 510fa12aab73..e080e943e295 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -14,7 +14,7 @@ 
 	 * Hooray, we are in Long 64-bit mode (but still running in low memory)
 	 */
 ENTRY(wakeup_long64)
-	movq	saved_magic, %rax
+	movq	saved_magic(%rip), %rax
 	movq	$0x123456789abcdef0, %rdx
 	cmpq	%rdx, %rax
 	jne	bogus_64_magic
@@ -25,14 +25,14 @@  ENTRY(wakeup_long64)
 	movw	%ax, %es
 	movw	%ax, %fs
 	movw	%ax, %gs
-	movq	saved_rsp, %rsp
+	movq	saved_rsp(%rip), %rsp
 
-	movq	saved_rbx, %rbx
-	movq	saved_rdi, %rdi
-	movq	saved_rsi, %rsi
-	movq	saved_rbp, %rbp
+	movq	saved_rbx(%rip), %rbx
+	movq	saved_rdi(%rip), %rdi
+	movq	saved_rsi(%rip), %rsi
+	movq	saved_rbp(%rip), %rbp
 
-	movq	saved_rip, %rax
+	movq	saved_rip(%rip), %rax
 	jmp	*%rax
 ENDPROC(wakeup_long64)
 
@@ -45,7 +45,7 @@  ENTRY(do_suspend_lowlevel)
 	xorl	%eax, %eax
 	call	save_processor_state
 
-	movq	$saved_context, %rax
+	leaq	saved_context(%rip), %rax
 	movq	%rsp, pt_regs_sp(%rax)
 	movq	%rbp, pt_regs_bp(%rax)
 	movq	%rsi, pt_regs_si(%rax)
@@ -64,13 +64,14 @@  ENTRY(do_suspend_lowlevel)
 	pushfq
 	popq	pt_regs_flags(%rax)
 
-	movq	$.Lresume_point, saved_rip(%rip)
+	leaq	.Lresume_point(%rip), %rax
+	movq	%rax, saved_rip(%rip)
 
-	movq	%rsp, saved_rsp
-	movq	%rbp, saved_rbp
-	movq	%rbx, saved_rbx
-	movq	%rdi, saved_rdi
-	movq	%rsi, saved_rsi
+	movq	%rsp, saved_rsp(%rip)
+	movq	%rbp, saved_rbp(%rip)
+	movq	%rbx, saved_rbx(%rip)
+	movq	%rdi, saved_rdi(%rip)
+	movq	%rsi, saved_rsi(%rip)
 
 	addq	$8, %rsp
 	movl	$3, %edi
@@ -82,7 +83,7 @@  ENTRY(do_suspend_lowlevel)
 	.align 4
 .Lresume_point:
 	/* We don't restore %rax, it must be 0 anyway */
-	movq	$saved_context, %rax
+	leaq	saved_context(%rip), %rax
 	movq	saved_context_cr4(%rax), %rbx
 	movq	%rbx, %cr4
 	movq	saved_context_cr3(%rax), %rbx