diff mbox

[v1,2/2] x86/power/64: Fix __PAGE_OFFSET usage on restore

Message ID 1470071280-78706-3-git-send-email-thgarnie@google.com (mailing list archive)
State New, archived
Headers show

Commit Message

Thomas Garnier Aug. 1, 2016, 5:08 p.m. UTC
When KASLR memory randomization is used, __PAGE_OFFSET is a global
variable changed during boot. The assembly code was using the variable
as an immediate value to calculate the cr3 physical address. The
physical address was incorrect resulting to a GP fault.

Signed-off-by: Thomas Garnier <thgarnie@google.com>
---
 arch/x86/power/hibernate_asm_64.S | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)
diff mbox

Patch

diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 8eee0e9..8db4905 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -23,6 +23,16 @@ 
 #include <asm/processor-flags.h>
 #include <asm/frame.h>
 
+/*
+ * A global variable holds the page_offset when KASLR memory randomization
+ * is enabled.
+ */
+#ifdef CONFIG_RANDOMIZE_MEMORY
+#define __PAGE_OFFSET_REF __PAGE_OFFSET
+#else
+#define __PAGE_OFFSET_REF $__PAGE_OFFSET
+#endif
+
 ENTRY(swsusp_arch_suspend)
 	movq	$saved_context, %rax
 	movq	%rsp, pt_regs_sp(%rax)
@@ -72,7 +82,7 @@  ENTRY(restore_image)
 	/* code below has been relocated to a safe page */
 ENTRY(core_restore_code)
 	/* switch to temporary page tables */
-	movq	$__PAGE_OFFSET, %rcx
+	movq	__PAGE_OFFSET_REF, %rcx
 	subq	%rcx, %rax
 	movq	%rax, %cr3
 	/* flush TLB */