From patchwork Mon Apr 10 16:44:19 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Thomas Garnier X-Patchwork-Id: 9673339 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 00B8D600CB for ; Mon, 10 Apr 2017 16:44:54 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 02E1B28179 for ; Mon, 10 Apr 2017 16:44:54 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id EB7AC2839B; Mon, 10 Apr 2017 16:44:53 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-4.1 required=2.0 tests=BAYES_00, DKIM_ADSP_CUSTOM_MED, DKIM_SIGNED, RCVD_IN_DNSWL_MED, T_DKIM_INVALID autolearn=ham version=3.3.1 Received: from mother.openwall.net (mother.openwall.net [195.42.179.200]) by mail.wl.linuxfoundation.org (Postfix) with SMTP id 069A628179 for ; Mon, 10 Apr 2017 16:44:52 +0000 (UTC) Received: (qmail 7688 invoked by uid 550); 10 Apr 2017 16:44:41 -0000 Mailing-List: contact kernel-hardening-help@lists.openwall.com; run by ezmlm Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-ID: Delivered-To: mailing list kernel-hardening@lists.openwall.com Received: (qmail 7512 invoked from network); 10 Apr 2017 16:44:39 -0000 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20161025; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=Yi0ZhmfXOZ4gcwBKGH7gk+8s7ehXVBNxH0htGLWIVzY=; b=P0MEmpTYz/6qnfWV8OeC2jjcaVQ1EU4JXq2ITmjhe9Fpq9AAFSbIb9Uz/yNojr/jV9 b9oMm2Xway7eJGrH3Dp6lyZ20MC39eq6XF83hjgQlEX74I2p18hpkUm5nkbBTQX3kNwn qn3RQg35oUelRb1obvWSpxEt1XeaqmEIPlvBB3GCx8Ycg+CkrhxOhSCYURwCbkhONX6T B2MfW6Tn4VUN0OeJnsxxLXSvgjL3kPlh9CVcqJnooWb8GB0eIhRaCAVt9WFtRaimEyqH opILCEYZao8PCW+0MQ1uz7WAmtxLeQJTAA5s4NFauswvAfYkwgbWtccho8gWUBP+oaQD TJLA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=Yi0ZhmfXOZ4gcwBKGH7gk+8s7ehXVBNxH0htGLWIVzY=; b=dJv8estkiLWpcBIo8cEf9/OsV6UwcOHsDNpqO5+LLCJ6uIGbRrH0/Lp+/xjdGI/IlR Gd6hZhsgthFsYFfsnE1e40a472MNPa7nz2fYPtKEkgDDQdOHn2RTErj3x67OCbQw6oGN D4V2EK1VE0QQMYDiNaGBDue+YD8TxqrE3wzIRh73gEb0T2YP3P4DHMWGAfF7Hit9EDYT zpBF5R3Sb4QgU6zTsZMutKyMiYWccc8HxFk4URP3oqZcZas88rCgFvh4hYGh0MIJDqYi yFhxOySyGtd4qRvH9WeERb5SbfdiEurf66QLtvaforIdLjPx5AEKMB54H6OxIsw5LgHt 3BCA== X-Gm-Message-State: AFeK/H2lRORIvzIlxBir5QTQ8vFWHjO2OmJwJgV0fYV1quvvHGK9UhdD5sygEeTc3B+zYhIv X-Received: by 10.99.247.69 with SMTP id f5mr58365175pgk.63.1491842667737; Mon, 10 Apr 2017 09:44:27 -0700 (PDT) From: Thomas Garnier To: Martin Schwidefsky , Heiko Carstens , Arnd Bergmann , Dave Hansen , Andrew Morton , Thomas Garnier , David Howells , =?UTF-8?q?Ren=C3=A9=20Nyffenegger?= , "Paul E . McKenney" , Ingo Molnar , Thomas Gleixner , Oleg Nesterov , Stephen Smalley , Pavel Tikhomirov , Ingo Molnar , "H . Peter Anvin" , Andy Lutomirski , Paolo Bonzini , Kees Cook , Rik van Riel , Josh Poimboeuf , Borislav Petkov , Brian Gerst , "Kirill A . Shutemov" , Christian Borntraeger , Russell King , Will Deacon , Catalin Marinas , Mark Rutland , James Morse Cc: linux-s390@vger.kernel.org, linux-kernel@vger.kernel.org, linux-api@vger.kernel.org, x86@kernel.org, linux-arm-kernel@lists.infradead.org, kernel-hardening@lists.openwall.com Date: Mon, 10 Apr 2017 09:44:19 -0700 Message-Id: <20170410164420.64003-3-thgarnie@google.com> X-Mailer: git-send-email 2.12.2.715.g7642488e1d-goog In-Reply-To: <20170410164420.64003-1-thgarnie@google.com> References: <20170410164420.64003-1-thgarnie@google.com> Subject: [kernel-hardening] [PATCH v7 3/4] arm/syscalls: Architecture specific pre-usermode check X-Virus-Scanned: ClamAV using ClamSMTP Disable the generic pre-usermode check in favor of an optimized implementation. This patch adds specific checks on user-mode return path to make it faster and smaller. The address limit is checked on each syscall return path to user-mode path as well as the irq user-mode return function. If the address limit was changed, a generic handler is called to stop the kernel on an explicit check. Signed-off-by: Thomas Garnier --- Based on next-20170410 --- arch/arm/Kconfig | 1 + arch/arm/kernel/entry-common.S | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b0e8c9763e94..af0992debfd2 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -12,6 +12,7 @@ config ARM select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_NO_SYSCALL_VERIFY_PRE_USERMODE_STATE select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 select ARCH_SUPPORTS_ATOMIC_RMW diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index eb5cd77bf1d8..c8a8ba5c22ad 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -12,6 +12,7 @@ #include #include #include +#include #ifdef CONFIG_AEABI #include #endif @@ -27,7 +28,6 @@ #include "entry-header.S" - .align 5 #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) /* @@ -40,9 +40,12 @@ ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) disable_irq_notrace @ disable interrupts + ldr r2, [tsk, #TI_ADDR_LIMIT] ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK bne fast_work_pending + cmp r2, #TASK_SIZE + blne address_limit_check_failed /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr @@ -66,6 +69,7 @@ ret_fast_syscall: UNWIND(.cantunwind ) str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 disable_irq_notrace @ disable interrupts + ldr r2, [tsk, #TI_ADDR_LIMIT] ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK beq no_work_pending @@ -82,6 +86,7 @@ slow_work_pending: mov r2, why @ 'syscall' bl do_work_pending cmp r0, #0 + ldreq r2, [tsk, #TI_ADDR_LIMIT] beq no_work_pending movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) ldmia sp, {r0 - r6} @ have to reload r0 - r6 @@ -99,9 +104,12 @@ ret_slow_syscall: disable_irq_notrace @ disable interrupts ENTRY(ret_to_user_from_irq) ldr r1, [tsk, #TI_FLAGS] + ldr r2, [tsk, #TI_ADDR_LIMIT] tst r1, #_TIF_WORK_MASK bne slow_work_pending no_work_pending: + cmp r2, #TASK_SIZE + blne address_limit_check_failed asm_trace_hardirqs_on save = 0 /* perform architecture specific actions before user return */