@@ -191,7 +191,7 @@ ENTRY(entry_SYSCALL_64_trampoline)
* spill RDI and restore it in a second-stage trampoline.
*/
pushq %rdi
- movq $entry_SYSCALL_64_stage2, %rdi
+ movabsq $entry_SYSCALL_64_stage2, %rdi
JMP_NOSPEC %rdi
END(entry_SYSCALL_64_trampoline)
@@ -1276,7 +1276,8 @@ ENTRY(error_entry)
movl %ecx, %eax /* zero extend */
cmpq %rax, RIP+8(%rsp)
je .Lbstep_iret
- cmpq $.Lgs_change, RIP+8(%rsp)
+ leaq .Lgs_change(%rip), %rcx
+ cmpq %rcx, RIP+8(%rsp)
jne .Lerror_entry_done
/*
@@ -1481,10 +1482,10 @@ ENTRY(nmi)
* resume the outer NMI.
*/
- movq $repeat_nmi, %rdx
+ leaq repeat_nmi(%rip), %rdx
cmpq 8(%rsp), %rdx
ja 1f
- movq $end_repeat_nmi, %rdx
+ leaq end_repeat_nmi(%rip), %rdx
cmpq 8(%rsp), %rdx
ja nested_nmi_out
1:
@@ -1538,7 +1539,8 @@ nested_nmi:
pushq %rdx
pushfq
pushq $__KERNEL_CS
- pushq $repeat_nmi
+ leaq repeat_nmi(%rip), %rdx
+ pushq %rdx
/* Put stack back */
addq $(6*8), %rsp
@@ -1577,7 +1579,11 @@ first_nmi:
addq $8, (%rsp) /* Fix up RSP */
pushfq /* RFLAGS */
pushq $__KERNEL_CS /* CS */
- pushq $1f /* RIP */
+ pushq $0 /* Futur return address */
+ pushq %rax /* Save RAX */
+ leaq 1f(%rip), %rax /* RIP */
+ movq %rax, 8(%rsp) /* Put 1f on return address */
+ popq %rax /* Restore RAX */
iretq /* continues at repeat_nmi below */
UNWIND_HINT_IRET_REGS
1:
@@ -208,11 +208,9 @@ identity_mapped:
movq %rax, %cr3
lea PAGE_SIZE(%r8), %rsp
call swap_pages
- jmp *virtual_mapped_addr(%rip)
-
- /* Absolute value for PIE support */
-virtual_mapped_addr:
- .quad virtual_mapped
+ movabsq $virtual_mapped, %rax
+ pushq %rax
+ ret
virtual_mapped:
movq RSP(%r8), %rsp
Change the assembly code to use only relative references of symbols for the kernel to be PIE compatible. Position Independent Executable (PIE) support will allow to extend the KASLR randomization range 0xffffffff80000000. Signed-off-by: Thomas Garnier <thgarnie@google.com> --- arch/x86/entry/entry_64.S | 18 ++++++++++++------ arch/x86/kernel/relocate_kernel_64.S | 8 +++----- 2 files changed, 15 insertions(+), 11 deletions(-)