From patchwork Thu Aug 10 17:26:00 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Thomas Garnier X-Patchwork-Id: 9894317 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 33AF760236 for ; Thu, 10 Aug 2017 17:29:07 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 1B0B51FF61 for ; Thu, 10 Aug 2017 17:29:07 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 0EA3B285F0; Thu, 10 Aug 2017 17:29:07 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-4.1 required=2.0 tests=BAYES_00, DKIM_ADSP_CUSTOM_MED, DKIM_SIGNED, RCVD_IN_DNSWL_MED, T_DKIM_INVALID autolearn=ham version=3.3.1 Received: from mother.openwall.net (mother.openwall.net [195.42.179.200]) by mail.wl.linuxfoundation.org (Postfix) with SMTP id 01E4E1FF61 for ; Thu, 10 Aug 2017 17:29:05 +0000 (UTC) Received: (qmail 32651 invoked by uid 550); 10 Aug 2017 17:27:10 -0000 Mailing-List: contact kernel-hardening-help@lists.openwall.com; run by ezmlm Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-ID: Delivered-To: mailing list kernel-hardening@lists.openwall.com Received: (qmail 32530 invoked from network); 10 Aug 2017 17:27:07 -0000 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20161025; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=deOUMY917awVbA/TuXM7vRmExUIlv5iGqjZLEltPoBI=; b=ufASSMnAgPHbRvRdUJaYBm/Gwsfj5NmPe1WJW1n1G+gWDPwWdwxAUrPVayYnoNiYLN S3otWcAGbgyetqzkVy/cc/2AVdB2sYvOPIcT6XlOB0c+Yw/LhhPUJILNzD27/Tk9Cp34 AJ7ChT1+Nb07imiLVQwHKtBQSSDzVS6ssn7uQ59gT+PmXZttPRhOJZZqqfTIOYfHQfEq nBvgsi3C2BkrSh9uIW/37/M9L+cknINHHUE/YtAVDA2sOzG4V4fEFvkidkCfHS5m2Mxt u6wFt9JjQZqsiqYa6bffMr2vnTYBDln/QIlWKqVfACjgeovTpGgdLKS9+zCuvGh8P58W Wssw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=deOUMY917awVbA/TuXM7vRmExUIlv5iGqjZLEltPoBI=; b=K3ShWEq1kG48cadscc3lmQzB8lCQaKvYXkgzwIHfqsgsoO8RF/uLoGHhg72JAAA8Mp 3WEWXHW5bx5KuG+x8xqHIJr+iAXvfUEAGuYaGlQFVrIUsPvoBba7B15gdxqQ+hgFctNo b/r1yeX+gRr2dKgOm15HtkHLruRoFZXDpByVa9RjYWKVthXooFQq6qoch4WvveRj0fye KA3G60XFNarwOZnTswaGTdoiAx7tBIBZC3uqIHfrIt+24t+870C5NKHZypeW9JBtQl4c QYNR85SUp2zRy7QTt08VUJVc46RB98/v+QcMeS4vz2B/QQGRpqI0tYTV8W+Iq56ye6CA GS3g== X-Gm-Message-State: AHYfb5hBkIw7QBKs7+WmyWb2D6x2X9kwkSLLHfa5ETmPMW5HY2C2l4pA ydve3TWblx58lsl0 X-Received: by 10.98.205.8 with SMTP id o8mr12975048pfg.241.1502386015057; Thu, 10 Aug 2017 10:26:55 -0700 (PDT) From: Thomas Garnier To: Herbert Xu , "David S . Miller" , Thomas Gleixner , Ingo Molnar , "H . Peter Anvin" , Peter Zijlstra , Josh Poimboeuf , Arnd Bergmann , Thomas Garnier , Matthias Kaehlcke , Boris Ostrovsky , Juergen Gross , Paolo Bonzini , =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= , Joerg Roedel , Tom Lendacky , Andy Lutomirski , Borislav Petkov , Brian Gerst , "Kirill A . Shutemov" , "Rafael J . Wysocki" , Len Brown , Pavel Machek , Tejun Heo , Christoph Lameter , Paul Gortmaker , Chris Metcalf , Andrew Morton , "Paul E . McKenney" , Nicolas Pitre , Christopher Li , "Rafael J . Wysocki" , Lukas Wunner , Mika Westerberg , Dou Liyang , Daniel Borkmann , Alexei Starovoitov , Masahiro Yamada , Markus Trippelsdorf , Steven Rostedt , Kees Cook , Rik van Riel , David Howells , Waiman Long , Kyle Huey , Peter Foley , Tim Chen , Catalin Marinas , Ard Biesheuvel , Michal Hocko , Matthew Wilcox , "H . J . Lu" , Paul Bolle , Rob Landley , Baoquan He , Daniel Micay Cc: x86@kernel.org, linux-crypto@vger.kernel.org, linux-kernel@vger.kernel.org, xen-devel@lists.xenproject.org, kvm@vger.kernel.org, linux-pm@vger.kernel.org, linux-arch@vger.kernel.org, linux-sparse@vger.kernel.org, kernel-hardening@lists.openwall.com Date: Thu, 10 Aug 2017 10:26:00 -0700 Message-Id: <20170810172615.51965-9-thgarnie@google.com> X-Mailer: git-send-email 2.14.0.434.g98096fd7a8-goog In-Reply-To: <20170810172615.51965-1-thgarnie@google.com> References: <20170810172615.51965-1-thgarnie@google.com> Subject: [kernel-hardening] [RFC v2 08/23] x86/entry/64: Adapt assembly for PIE support X-Virus-Scanned: ClamAV using ClamSMTP Change the assembly code to use only relative references of symbols for the kernel to be PIE compatible. Position Independent Executable (PIE) support will allow to extended the KASLR randomization range below the -2G memory limit. Signed-off-by: Thomas Garnier --- arch/x86/entry/entry_64.S | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index daf8936d0628..a3967a2af6ec 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -199,12 +199,15 @@ entry_SYSCALL_64_fastpath: ja 1f /* return -ENOSYS (already in pt_regs->ax) */ movq %r10, %rcx + /* Ensures the call is position independent */ + leaq sys_call_table(%rip), %r11 + /* * This call instruction is handled specially in stub_ptregs_64. * It might end up jumping to the slow path. If it jumps, RAX * and all argument registers are clobbered. */ - call *sys_call_table(, %rax, 8) + call *(%r11, %rax, 8) .Lentry_SYSCALL_64_after_fastpath_call: movq %rax, RAX(%rsp) @@ -339,7 +342,8 @@ ENTRY(stub_ptregs_64) * RAX stores a pointer to the C function implementing the syscall. * IRQs are on. */ - cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) + leaq .Lentry_SYSCALL_64_after_fastpath_call(%rip), %r11 + cmpq %r11, (%rsp) jne 1f /* @@ -1210,7 +1214,8 @@ ENTRY(error_entry) movl %ecx, %eax /* zero extend */ cmpq %rax, RIP+8(%rsp) je .Lbstep_iret - cmpq $.Lgs_change, RIP+8(%rsp) + leaq .Lgs_change(%rip), %rcx + cmpq %rcx, RIP+8(%rsp) jne .Lerror_entry_done /* @@ -1430,10 +1435,10 @@ ENTRY(nmi) * resume the outer NMI. */ - movq $repeat_nmi, %rdx + leaq repeat_nmi(%rip), %rdx cmpq 8(%rsp), %rdx ja 1f - movq $end_repeat_nmi, %rdx + leaq end_repeat_nmi(%rip), %rdx cmpq 8(%rsp), %rdx ja nested_nmi_out 1: @@ -1487,7 +1492,8 @@ nested_nmi: pushq %rdx pushfq pushq $__KERNEL_CS - pushq $repeat_nmi + leaq repeat_nmi(%rip), %rdx + pushq %rdx /* Put stack back */ addq $(6*8), %rsp @@ -1526,7 +1532,9 @@ first_nmi: addq $8, (%rsp) /* Fix up RSP */ pushfq /* RFLAGS */ pushq $__KERNEL_CS /* CS */ - pushq $1f /* RIP */ + pushq %rax /* Support Position Independent Code */ + leaq 1f(%rip), %rax /* RIP */ + xchgq %rax, (%rsp) /* Restore RAX, put 1f */ INTERRUPT_RETURN /* continues at repeat_nmi below */ UNWIND_HINT_IRET_REGS 1: