From patchwork Thu Aug 2 13:21:32 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 10553621 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id E7BF513BB for ; Thu, 2 Aug 2018 13:25:13 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id DACA425404 for ; Thu, 2 Aug 2018 13:25:13 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id CF5282BFA3; Thu, 2 Aug 2018 13:25:13 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.3 required=2.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_AU,MAILING_LIST_MULTI,RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from mother.openwall.net (mother.openwall.net [195.42.179.200]) by mail.wl.linuxfoundation.org (Postfix) with SMTP id E6FC82AF21 for ; Thu, 2 Aug 2018 13:25:12 +0000 (UTC) Received: (qmail 5774 invoked by uid 550); 2 Aug 2018 13:24:44 -0000 Mailing-List: contact kernel-hardening-help@lists.openwall.com; run by ezmlm Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-ID: Delivered-To: mailing list kernel-hardening@lists.openwall.com Received: (qmail 5476 invoked from network); 2 Aug 2018 13:24:38 -0000 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linaro.org; s=google; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=UxmaOcSt7U9Tkz2DoHy2PhphJn7qSF0D/IOJ3IU5rB4=; b=fJqDG2QCzt6jL8LJ7uHw2xVcN6h8j2eYlf9PNm88DMTA5tr2my6RCnHmA1lktGKEry GhNPuU5VkkTcemfcGJ/KvWUZv0WsTRnVossCqUuvHK7m8WEyObu1C1uqOF42Gkrk+zW/ OoCFxMh78Oe4v8z2io/crtEph6XMRV/bocGTE= X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=UxmaOcSt7U9Tkz2DoHy2PhphJn7qSF0D/IOJ3IU5rB4=; b=a3RRzq/QaxdTbW5C5zGtzRF5lpYLvQ1CCrKYtbAxo4vBCSM7nMFZv4dswVE4NloSeh De+52ZD7I62vGUcEZVRRsXVF/yjOrtb6BmZwYtFeAJ89kZIg/H3+8U7M4pRNjlSm10lG Xrdq5AkmEs+5qdGO++OCzXZpjja/kVwoWzMxmRx3leczIEtKIvfDNCIjKwzXkg7bdzXo ynsaGYXn7/0mdF97pl3x5W6rA4RSghQHfD28wwnT2/3RfjOqAToR3bv8gzf1o2WrQt39 sPOUD5IZrtiPUP+hiTyDKh8OSErlcewB4vu/Qz1aNaFsVrD8kKPK4TmLtknPdNFHmwJa 5PoA== X-Gm-Message-State: AOUpUlF+NhmBmbidk5hhZFnmA7ptijoG3mxlAQudefUctwckk4OmU2ch oPrPtj3PupFbm5OnOJA+4pzXwd7Re1owAg== X-Google-Smtp-Source: AAOMgpcDwCKwnC9FljNBDoZ3PnphQpvYBhdWsnJv0j9Gi7VNYPL7++wwa9UPwRfJszqPnsD7BqqG2g== X-Received: by 2002:a50:81c6:: with SMTP id 64-v6mr3381806ede.89.1533216266905; Thu, 02 Aug 2018 06:24:26 -0700 (PDT) From: Ard Biesheuvel To: kernel-hardening@lists.openwall.com Cc: keescook@chromium.org, christoffer.dall@arm.com, will.deacon@arm.com, catalin.marinas@arm.com, mark.rutland@arm.com, labbott@fedoraproject.org, linux-arm-kernel@lists.infradead.org, Ard Biesheuvel Subject: [RFC/PoC PATCH 3/3] arm64: enable ROP protection by clearing SP bit #55 across function returns Date: Thu, 2 Aug 2018 15:21:32 +0200 Message-Id: <20180802132133.23999-4-ard.biesheuvel@linaro.org> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20180802132133.23999-1-ard.biesheuvel@linaro.org> References: <20180802132133.23999-1-ard.biesheuvel@linaro.org> X-Virus-Scanned: ClamAV using ClamSMTP ROP attacks rely on a large supply of so-called 'gadgets', which are (in this context) short sequences of instructions ending in a stack pop and a return instruction. By exploiting a stack overflow to create a specially crafted stack frame, each gadget jumps to the next by popping off the next gadget's address as a fake return address, allowing non-trivial 'programs' to be executed by piecing together a large number of such gadgets. This attack vector relies heavily on the ability to jump to arbitrary places in the code. If we could limit where a function could return to, it is much more difficult to obtain critical mass in terms of a gadget collection that allows arbitrary attacks to be mounted. So let's try and do so by clearing bit #55 in the stack pointer register before returning from a function, and setting it again right after a 'bl' or 'blr' instruction. That way, jumping to arbitrary places in the code and popping the next gadget's address becomes a lot more complicated, since the stack pointer will not be valid after a function return until the 'reset' sequence is executed (or after an exception is taken). Signed-off-by: Ard Biesheuvel --- arch/arm64/Kconfig | 10 ++++++++++ arch/arm64/include/asm/assembler.h | 9 +++++++++ arch/arm64/kernel/entry.S | 18 ++++++++++++++++++ 3 files changed, 37 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 42c090cf0292..4562af0250b9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1011,6 +1011,16 @@ config ARM64_SW_TTBR0_PAN zeroed area and reserved ASID. The user access routines restore the valid TTBR0_EL1 temporarily. +config ARM64_ROP_SHIELD + bool "Enable basic ROP protection through the stack pointer sign bit" + depends on GCC_PLUGINS && VMAP_STACK + select GCC_PLUGIN_ARM64_ROP_SHIELD + help + Enable protection against ROP attacks by clearing bit #55 in the + stack pointer register across a function return. + + If paranoid, say Y here. If unsure, say N. + menu "ARMv8.1 architectural features" config ARM64_HW_AFDBM diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 346ada4de48a..95d3ec98eb58 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -701,12 +701,21 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .Lyield_out_\@ : .endm + .macro unclobber_sp, tmp +#ifdef CONFIG_ARM64_ROP_SHIELD + mov \tmp, sp + orr sp, \tmp, #(1 << 55) +#endif + .endm + .macro bl_c, target bl \target + unclobber_sp x30 .endm .macro blr_c, reg blr \reg + unclobber_sp x30 .endm #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index eba5b6b528ea..2adebca74f11 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -95,6 +95,9 @@ alternative_else_nop_endif */ add sp, sp, x0 // sp' = sp + x0 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp +#ifdef CONFIG_ARM64_ROP_SHIELD + tbz x0, #55, 1f +#endif tbnz x0, #THREAD_SHIFT, 0f sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp @@ -129,6 +132,21 @@ alternative_else_nop_endif /* We were already on the overflow stack. Restore sp/x0 and carry on. */ sub sp, sp, x0 mrs x0, tpidrro_el0 + b el\()\el\()_\label + +#ifdef CONFIG_ARM64_ROP_SHIELD +1: /* + * We have to do a little dance here to set bit 55 in the stack + * pointer register without clobbering anything else. + */ + orr x0, x0, #(1 << 55) + str x1, [x0] + mov x1, sp + mov sp, x0 + and x0, x0, #~(1 << 55) + sub x0, x1, x0 + ldr x1, [sp] +#endif #endif b el\()\el\()_\label .endm