From patchwork Thu Aug 2 13:21:30 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 10553615 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id A9BC515E9 for ; Thu, 2 Aug 2018 13:24:58 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 9B6092BFA3 for ; Thu, 2 Aug 2018 13:24:58 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 8F2F92BFB0; Thu, 2 Aug 2018 13:24:58 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.3 required=2.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_AU,MAILING_LIST_MULTI,RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from mother.openwall.net (mother.openwall.net [195.42.179.200]) by mail.wl.linuxfoundation.org (Postfix) with SMTP id D3A112BFA3 for ; Thu, 2 Aug 2018 13:24:56 +0000 (UTC) Received: (qmail 5497 invoked by uid 550); 2 Aug 2018 13:24:38 -0000 Mailing-List: contact kernel-hardening-help@lists.openwall.com; run by ezmlm Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-ID: Delivered-To: mailing list kernel-hardening@lists.openwall.com Received: (qmail 5317 invoked from network); 2 Aug 2018 13:24:36 -0000 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linaro.org; s=google; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=6CCcrv4zDAeus0dIdqA62BzN1XB61CfZlFo1dRZHj5k=; b=IlvPU5B/iy1icNJkwsix/H+454i5+Q6pwW8GexLEMUnkNNs8/CXSygZlSJsVT2qbNP 7RPkpwLYplNtQPQojjDdw5E8zXT7YnXuIAh7DwVGJRCil96Ct5ihOuM0vCBhvBGORuuG n12/D97tGUDGyXXvCXxKaGr/ur65nF9XJyc+s= X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=6CCcrv4zDAeus0dIdqA62BzN1XB61CfZlFo1dRZHj5k=; b=W9MRtL0BNh7ocX+nebdlBnQAOuhgEKlg8dtegdc88RtMlHdrz0J5eez1nC8BD+S24Q gLoB/xqisQ8XlZ4hD7rvzHa762EyqQtL5tNvwgC8dO29u3jzD0KpcDO1LIC/b0ZdRacT B14pIUs5OxfRGznUgqspzjwAi/O4Fx3wzzO75nlvS/Q6NDmv4W+wZR77bExz9hT5O5x5 8hcdhptesYc25N5j0CytJKUzBcPU6aYH7YHM4D42MLG89DgwDscsDZQpGQk+XKCAx0Bc W3z1JnFmaMrH8qCjkxmEu26kfsgFQOiocw/pCA5pN/z023foBQxTfbPR9JPJdrXeH98w SiDQ== X-Gm-Message-State: AOUpUlEu12qtRiLbBTFNYbr8PtEcUaZxqAc+/WPPItsluw5fouCjq8Vq vrBC8ufrtWjWPBBNP6BBc4GgNE/ec84XoQ== X-Google-Smtp-Source: AAOMgpeiEIjVbFG3nfL/WF/TvzfFVD0f2938sIjKY6WE4lwkO7hwkhxLiCXXKRr0l7QW1Rl0CXrEJw== X-Received: by 2002:a50:a347:: with SMTP id 65-v6mr3313202edn.299.1533216264648; Thu, 02 Aug 2018 06:24:24 -0700 (PDT) From: Ard Biesheuvel To: kernel-hardening@lists.openwall.com Cc: keescook@chromium.org, christoffer.dall@arm.com, will.deacon@arm.com, catalin.marinas@arm.com, mark.rutland@arm.com, labbott@fedoraproject.org, linux-arm-kernel@lists.infradead.org, Ard Biesheuvel Subject: [RFC/PoC PATCH 1/3] arm64: use wrapper macro for bl/blx instructions from asm code Date: Thu, 2 Aug 2018 15:21:30 +0200 Message-Id: <20180802132133.23999-2-ard.biesheuvel@linaro.org> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20180802132133.23999-1-ard.biesheuvel@linaro.org> References: <20180802132133.23999-1-ard.biesheuvel@linaro.org> X-Virus-Scanned: ClamAV using ClamSMTP In preparation of enabling a feature that temporarily clears the sign bit in the stack pointer register across a subroutine return, switch to bl_c/blr_c macros for making such calls from assembler source. They will be updated in a subsequent patch to conditionally incorporate the restore sequence for the stack pointer register. Signed-off-by: Ard Biesheuvel --- arch/arm64/include/asm/assembler.h | 12 ++- arch/arm64/kernel/entry-ftrace.S | 6 +- arch/arm64/kernel/entry.S | 86 ++++++++++---------- arch/arm64/kernel/head.S | 4 +- arch/arm64/kernel/probes/kprobes_trampoline.S | 2 +- arch/arm64/kernel/sleep.S | 6 +- 6 files changed, 62 insertions(+), 54 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 0bcc98dbba56..346ada4de48a 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -687,8 +687,8 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm .macro do_cond_yield_neon - bl kernel_neon_end - bl kernel_neon_begin + bl_c kernel_neon_end + bl_c kernel_neon_begin .endm .macro endif_yield_neon, lbl @@ -701,4 +701,12 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .Lyield_out_\@ : .endm + .macro bl_c, target + bl \target + .endm + + .macro blr_c, reg + blr \reg + .endm + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 1175f5827ae1..4691eef0dc65 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -106,7 +106,7 @@ ENTRY(_mcount) mcount_get_pc x0 // function's pc mcount_get_lr x1 // function's lr (= parent's pc) - blr x2 // (*ftrace_trace_function)(pc, lr); + blr_c x2 // (*ftrace_trace_function)(pc, lr); skip_ftrace_call: // } #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -200,7 +200,7 @@ ENTRY(ftrace_graph_caller) mcount_get_lr_addr x0 // pointer to function's saved lr mcount_get_pc x1 // function's pc mcount_get_parent_fp x2 // parent's fp - bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp) + bl_c prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp) mcount_exit ENDPROC(ftrace_graph_caller) @@ -215,7 +215,7 @@ ENDPROC(ftrace_graph_caller) ENTRY(return_to_handler) save_return_regs mov x0, x29 // parent's fp - bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); + bl_c ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); mov x30, x0 // restore the original return address restore_return_regs ret diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 28ad8799406f..eba5b6b528ea 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -43,7 +43,7 @@ */ .macro ct_user_exit, syscall = 0 #ifdef CONFIG_CONTEXT_TRACKING - bl context_tracking_user_exit + bl_c context_tracking_user_exit .if \syscall == 1 /* * Save/restore needed during syscalls. Restore syscall arguments from @@ -59,7 +59,7 @@ .macro ct_user_enter #ifdef CONFIG_CONTEXT_TRACKING - bl context_tracking_user_enter + bl_c context_tracking_user_enter #endif .endm @@ -305,7 +305,7 @@ alternative_else_nop_endif * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache * corruption). */ - bl post_ttbr_update_workaround + bl_c post_ttbr_update_workaround .endif 1: .if \el != 0 @@ -425,7 +425,7 @@ tsk .req x28 // current thread_info ldr_l x1, handle_arch_irq mov x0, sp irq_stack_entry - blr x1 + blr_c x1 irq_stack_exit .endm @@ -490,7 +490,7 @@ __bad_stack: mov x0, sp /* Time to die */ - bl handle_bad_stack + bl_c handle_bad_stack ASM_BUG() #endif /* CONFIG_VMAP_STACK */ @@ -502,7 +502,7 @@ __bad_stack: mov x0, sp mov x1, #\reason mrs x2, esr_el1 - bl bad_mode + bl_c bad_mode ASM_BUG() .endm @@ -580,7 +580,7 @@ el1_da: inherit_daif pstate=x23, tmp=x2 clear_address_tag x0, x3 mov x2, sp // struct pt_regs - bl do_mem_abort + bl_c do_mem_abort kernel_exit 1 el1_sp_pc: @@ -590,7 +590,7 @@ el1_sp_pc: mrs x0, far_el1 inherit_daif pstate=x23, tmp=x2 mov x2, sp - bl do_sp_pc_abort + bl_c do_sp_pc_abort ASM_BUG() el1_undef: /* @@ -598,7 +598,7 @@ el1_undef: */ inherit_daif pstate=x23, tmp=x2 mov x0, sp - bl do_undefinstr + bl_c do_undefinstr ASM_BUG() el1_dbg: /* @@ -609,7 +609,7 @@ el1_dbg: tbz x24, #0, el1_inv // EL1 only mrs x0, far_el1 mov x2, sp // struct pt_regs - bl do_debug_exception + bl_c do_debug_exception kernel_exit 1 el1_inv: // TODO: add support for undefined instructions in kernel mode @@ -617,7 +617,7 @@ el1_inv: mov x0, sp mov x2, x1 mov x1, #BAD_SYNC - bl bad_mode + bl_c bad_mode ASM_BUG() ENDPROC(el1_sync) @@ -626,7 +626,7 @@ el1_irq: kernel_entry 1 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off + bl_c trace_hardirqs_off #endif irq_handler @@ -636,11 +636,11 @@ el1_irq: cbnz w24, 1f // preempt count != 0 ldr x0, [tsk, #TSK_TI_FLAGS] // get flags tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? - bl el1_preempt + bl_c el1_preempt 1: #endif #ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_on + bl_c trace_hardirqs_on #endif kernel_exit 1 ENDPROC(el1_irq) @@ -648,7 +648,7 @@ ENDPROC(el1_irq) #ifdef CONFIG_PREEMPT el1_preempt: mov x24, lr -1: bl preempt_schedule_irq // irq en/disable is done inside +1: bl_c preempt_schedule_irq // irq en/disable is done inside ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? ret x24 @@ -749,7 +749,7 @@ el0_da: clear_address_tag x0, x26 mov x1, x25 mov x2, sp - bl do_mem_abort + bl_c do_mem_abort b ret_to_user el0_ia: /* @@ -758,13 +758,13 @@ el0_ia: mrs x26, far_el1 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off + bl_c trace_hardirqs_off #endif ct_user_exit mov x0, x26 mov x1, x25 mov x2, sp - bl do_el0_ia_bp_hardening + bl_c do_el0_ia_bp_hardening b ret_to_user el0_fpsimd_acc: /* @@ -774,7 +774,7 @@ el0_fpsimd_acc: ct_user_exit mov x0, x25 mov x1, sp - bl do_fpsimd_acc + bl_c do_fpsimd_acc b ret_to_user el0_sve_acc: /* @@ -784,7 +784,7 @@ el0_sve_acc: ct_user_exit mov x0, x25 mov x1, sp - bl do_sve_acc + bl_c do_sve_acc b ret_to_user el0_fpsimd_exc: /* @@ -794,7 +794,7 @@ el0_fpsimd_exc: ct_user_exit mov x0, x25 mov x1, sp - bl do_fpsimd_exc + bl_c do_fpsimd_exc b ret_to_user el0_sp_pc: /* @@ -803,13 +803,13 @@ el0_sp_pc: mrs x26, far_el1 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off + bl_c trace_hardirqs_off #endif ct_user_exit mov x0, x26 mov x1, x25 mov x2, sp - bl do_sp_pc_abort + bl_c do_sp_pc_abort b ret_to_user el0_undef: /* @@ -818,7 +818,7 @@ el0_undef: enable_daif ct_user_exit mov x0, sp - bl do_undefinstr + bl_c do_undefinstr b ret_to_user el0_sys: /* @@ -828,7 +828,7 @@ el0_sys: ct_user_exit mov x0, x25 mov x1, sp - bl do_sysinstr + bl_c do_sysinstr b ret_to_user el0_dbg: /* @@ -838,7 +838,7 @@ el0_dbg: mrs x0, far_el1 mov x1, x25 mov x2, sp - bl do_debug_exception + bl_c do_debug_exception enable_daif ct_user_exit b ret_to_user @@ -848,7 +848,7 @@ el0_inv: mov x0, sp mov x1, #BAD_SYNC mov x2, x25 - bl bad_el0_sync + bl_c bad_el0_sync b ret_to_user ENDPROC(el0_sync) @@ -858,19 +858,19 @@ el0_irq: el0_irq_naked: enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off + bl_c trace_hardirqs_off #endif ct_user_exit #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR tbz x22, #55, 1f - bl do_el0_irq_bp_hardening + bl_c do_el0_irq_bp_hardening 1: #endif irq_handler #ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_on + bl_c trace_hardirqs_on #endif b ret_to_user ENDPROC(el0_irq) @@ -880,7 +880,7 @@ el1_error: mrs x1, esr_el1 enable_dbg mov x0, sp - bl do_serror + bl_c do_serror kernel_exit 1 ENDPROC(el1_error) @@ -890,7 +890,7 @@ el0_error_naked: mrs x1, esr_el1 enable_dbg mov x0, sp - bl do_serror + bl_c do_serror enable_daif ct_user_exit b ret_to_user @@ -920,9 +920,9 @@ ret_fast_syscall_trace: */ work_pending: mov x0, sp // 'regs' - bl do_notify_resume + bl_c do_notify_resume #ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_on // enabled while in userspace + bl_c trace_hardirqs_on // enabled while in userspace #endif ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step b finish_ret_to_user @@ -980,11 +980,11 @@ el0_svc_naked: // compat entry point b.hs ni_sys mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number ldr x16, [stbl, xscno, lsl #3] // address in the syscall table - blr x16 // call sys_* routine + blr_c x16 // call sys_* routine b ret_fast_syscall ni_sys: mov x0, sp - bl do_ni_syscall + bl_c do_ni_syscall b ret_fast_syscall ENDPROC(el0_svc) @@ -998,7 +998,7 @@ __sys_trace: mov x0, #-ENOSYS // set default errno if so str x0, [sp, #S_X0] 1: mov x0, sp - bl syscall_trace_enter + bl_c syscall_trace_enter cmp w0, #NO_SYSCALL // skip the syscall? b.eq __sys_trace_return_skipped mov wscno, w0 // syscall number (possibly new) @@ -1010,18 +1010,18 @@ __sys_trace: ldp x4, x5, [sp, #S_X4] ldp x6, x7, [sp, #S_X6] ldr x16, [stbl, xscno, lsl #3] // address in the syscall table - blr x16 // call sys_* routine + blr_c x16 // call sys_* routine __sys_trace_return: str x0, [sp, #S_X0] // save returned x0 __sys_trace_return_skipped: mov x0, sp - bl syscall_trace_exit + bl_c syscall_trace_exit b ret_to_user __ni_sys_trace: mov x0, sp - bl do_ni_syscall + bl_c do_ni_syscall b __sys_trace_return .popsection // .entry.text @@ -1182,10 +1182,10 @@ NOKPROBE(cpu_switch_to) * This is how we return from a fork. */ ENTRY(ret_from_fork) - bl schedule_tail + bl_c schedule_tail cbz x19, 1f // not a kernel thread mov x0, x20 - blr x19 + blr_c x19 1: get_thread_info tsk b ret_to_user ENDPROC(ret_from_fork) @@ -1337,7 +1337,7 @@ ENTRY(__sdei_asm_handler) add x0, x19, #SDEI_EVENT_INTREGS mov x1, x19 - bl __sdei_handler + bl_c __sdei_handler msr sp_el0, x28 /* restore regs >x17 that we clobbered */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index b0853069702f..10414bbbeecb 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -432,13 +432,13 @@ __primary_switched: dsb ishst // Make zero page visible to PTW #ifdef CONFIG_KASAN - bl kasan_early_init + bl_c kasan_early_init #endif #ifdef CONFIG_RANDOMIZE_BASE tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? b.ne 0f mov x0, x21 // pass FDT address in x0 - bl kaslr_early_init // parse FDT for KASLR options + bl_c kaslr_early_init // parse FDT for KASLR options cbz x0, 0f // KASLR disabled? just proceed orr x23, x23, x0 // record KASLR offset ldp x29, x30, [sp], #16 // we must enable KASLR, return diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S index 45dce03aaeaf..0b195b727dc7 100644 --- a/arch/arm64/kernel/probes/kprobes_trampoline.S +++ b/arch/arm64/kernel/probes/kprobes_trampoline.S @@ -67,7 +67,7 @@ ENTRY(kretprobe_trampoline) save_all_base_regs mov x0, sp - bl trampoline_probe_handler + bl_c trampoline_probe_handler /* * Replace trampoline address in lr with actual orig_ret_addr return * address. diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index bebec8ef9372..6ced3a8bb528 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -90,7 +90,7 @@ ENTRY(__cpu_suspend_enter) str x0, [x1] add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS stp x29, lr, [sp, #-16]! - bl cpu_do_suspend + bl_c cpu_do_suspend ldp x29, lr, [sp], #16 mov x0, #1 ret @@ -129,11 +129,11 @@ ENTRY(_cpu_resume) /* * cpu_do_resume expects x0 to contain context address pointer */ - bl cpu_do_resume + bl_c cpu_do_resume #ifdef CONFIG_KASAN mov x0, sp - bl kasan_unpoison_task_stack_below + bl_c kasan_unpoison_task_stack_below #endif ldp x19, x20, [x29, #16] From patchwork Thu Aug 2 13:21:31 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 10553619 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 4609A9093 for ; Thu, 2 Aug 2018 13:25:06 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 38979285DC for ; Thu, 2 Aug 2018 13:25:06 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 36DDA2BEF4; Thu, 2 Aug 2018 13:25:06 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.3 required=2.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_AU,MAILING_LIST_MULTI,RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from mother.openwall.net (mother.openwall.net [195.42.179.200]) by mail.wl.linuxfoundation.org (Postfix) with SMTP id EAAD1285DC for ; Thu, 2 Aug 2018 13:25:04 +0000 (UTC) Received: (qmail 5547 invoked by uid 550); 2 Aug 2018 13:24:41 -0000 Mailing-List: contact kernel-hardening-help@lists.openwall.com; run by ezmlm Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-ID: Delivered-To: mailing list kernel-hardening@lists.openwall.com Received: (qmail 5339 invoked from network); 2 Aug 2018 13:24:37 -0000 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linaro.org; s=google; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=ZuU5tw1umOShQcY2bH47vnLGIRJrb30D15fMPSNERlM=; b=dOJHCKEEb9v2PL9y+aKC4NYGYoOr2m0fLKvN6CVd/cjHjLRvuivgbhw3qaG0Uu+snp 9ZnAosXNKnSEK/oWHaNszsDcQ94NZgTuDAwz5MJoD35FYlWTfoYUUmUlbs2h/Y06k0ka C68dvTyBUQM9JXp4VpcdxInZkXdx+cjezAzAw= X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=ZuU5tw1umOShQcY2bH47vnLGIRJrb30D15fMPSNERlM=; b=WRFSOB8i9YQxoYWXhshXwgK8KWdUII97V9FPERqYqiMnuaWZiahfWVxw9RX9V5ZU71 mVXieK1SuDIXReXoe/VQgaVYIeW0R06TnUNHtNhsJHHZ3E8mMgGhJEmWbEZgyjvsMnsU 0fqwdoEyNhAZxLRvNFr8WccqsZl90l/cv/j3huIdW7ergbF8uJ2WW7Id3j8h8BJ3KIST 29juuNAyRy5BV+zYDH3Nmz5m66QjPF+E8IWGBWsSwu6K3fqw3htFDvLPTw15n2ht4u1L +C1r9T+29ZJfV5ZS+Ifo9STFswyrByEjdy2ZHoVTIyKymgvTtsdEsHhfa9FNOHXXD1h/ gpxg== X-Gm-Message-State: AOUpUlH3dL24sn31PGY1yQwhwfiaUesrcmAuCABc4RidOmANvj0kBkzD BmIls7RFUsfs37RalcqbIwaoOmHJCRdzeg== X-Google-Smtp-Source: AAOMgpfVrZpysEZmq/ghNrEbTI/T2fGZepuwy0LgMoOeLU7nRNRm1OJiLI1Fq/T4irODp1UQB577XQ== X-Received: by 2002:a50:a186:: with SMTP id 6-v6mr3460139edk.12.1533216265701; Thu, 02 Aug 2018 06:24:25 -0700 (PDT) From: Ard Biesheuvel To: kernel-hardening@lists.openwall.com Cc: keescook@chromium.org, christoffer.dall@arm.com, will.deacon@arm.com, catalin.marinas@arm.com, mark.rutland@arm.com, labbott@fedoraproject.org, linux-arm-kernel@lists.infradead.org, Ard Biesheuvel Subject: [RFC/PoC PATCH 2/3] gcc: plugins: add ROP shield plugin for arm64 Date: Thu, 2 Aug 2018 15:21:31 +0200 Message-Id: <20180802132133.23999-3-ard.biesheuvel@linaro.org> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20180802132133.23999-1-ard.biesheuvel@linaro.org> References: <20180802132133.23999-1-ard.biesheuvel@linaro.org> X-Virus-Scanned: ClamAV using ClamSMTP Add a plugin that mangles every 'ret' instruction so bit 55 in the stack pointer register is cleared first, and every 'bl' or 'blr' instruction so that the bit is set again right after the call returns. This should make it very difficult for ROP attacks to be staged, given that the supply of gadgets is now reduced to those that start with the 'reset bit #55' sequence, which only occurs right after a function return when all caller save registers are dead. Signed-off-by: Ard Biesheuvel --- arch/Kconfig | 4 + drivers/firmware/efi/libstub/Makefile | 3 +- scripts/Makefile.gcc-plugins | 7 ++ scripts/gcc-plugins/arm64_rop_shield_plugin.c | 116 ++++++++++++++++++++ 4 files changed, 129 insertions(+), 1 deletion(-) diff --git a/arch/Kconfig b/arch/Kconfig index 1aa59063f1fd..d61d1249d986 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -549,6 +549,10 @@ config GCC_PLUGIN_RANDSTRUCT_PERFORMANCE in structures. This reduces the performance hit of RANDSTRUCT at the cost of weakened randomization. +config GCC_PLUGIN_ARM64_ROP_SHIELD + bool + depends on GCC_PLUGINS && ARM64 + config HAVE_STACKPROTECTOR bool help diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index a34e9290a699..9b1510e5957f 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -20,7 +20,8 @@ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ -D__NO_FORTIFY \ $(call cc-option,-ffreestanding) \ - $(call cc-option,-fno-stack-protector) + $(call cc-option,-fno-stack-protector) \ + $(DISABLE_ARM64_ROP_SHIELD_PLUGIN) GCOV_PROFILE := n KASAN_SANITIZE := n diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index c961b9a65d11..f4f9c27fb3a0 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -17,10 +17,17 @@ gcc-plugin-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) += randomize_layout_plugin.so gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) += -DRANDSTRUCT_PLUGIN gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE) += -fplugin-arg-randomize_layout_plugin-performance-mode +ifdef CONFIG_GCC_PLUGIN_ARM64_ROP_SHIELD +gcc-plugin-y += arm64_rop_shield_plugin.so +gcc-plugin-cflags-y += -DARM64_ROP_SHIELD_PLUGIN +DISABLE_ARM64_ROP_SHIELD_PLUGIN += -fplugin-arg-arm64_rop_shield_plugin-disable +endif + GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y)) export GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR export DISABLE_LATENT_ENTROPY_PLUGIN +export DISABLE_ARM64_ROP_SHIELD_PLUGIN # sancov_plugin.so can be only in CFLAGS_KCOV because avoid duplication. GCC_PLUGINS_CFLAGS := $(filter-out %/sancov_plugin.so, $(GCC_PLUGINS_CFLAGS)) diff --git a/scripts/gcc-plugins/arm64_rop_shield_plugin.c b/scripts/gcc-plugins/arm64_rop_shield_plugin.c new file mode 100644 index 000000000000..e87f4a9b9ab0 --- /dev/null +++ b/scripts/gcc-plugins/arm64_rop_shield_plugin.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2018 Ard Biesheuvel + */ + +#include "gcc-common.h" + +__visible int plugin_is_GPL_compatible; + +static unsigned int arm64_rop_shield_execute(void) +{ + rtx_insn *insn; + rtx body, x, y; + + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { + if (JUMP_P(insn)) { + body = PATTERN(insn); + + if (GET_CODE(body) != RETURN) + continue; + + x = gen_rtx_ASM_OPERANDS(VOIDmode, + "mov x16, sp \n\t" + "and sp, x16, #~(1 << 55)", + "", + 0, + rtvec_alloc(0), + rtvec_alloc(0), + rtvec_alloc(0), + UNKNOWN_LOCATION); + MEM_VOLATILE_P(x) = true; + + /* + * According to the AAPCS spec, x16 may only be used by + * subroutine calls that are exposed via a jump/call + * ELF relocation, and so the compiler may assume it is + * preserved across a call to a function in the same + * compilation unit. So mark x16 as clobbered + * explicitly. + */ + y = gen_rtx_CLOBBER(VOIDmode, gen_rtx_REG(Pmode, 16)); + + emit_insn_before(gen_rtx_PARALLEL(VOIDmode, + gen_rtvec(2, x, y)), + insn); + } + + if (CALL_P(insn)) { + rtx_insn *next; + + /* + * We can use x30 here without marking it as clobbered. + * The bl instruction already clobbers it, and whether + * we returned here via a plain 'ret' instruction or via + * some other way is unspecified, so it is no longer + * live when we get here. + */ + x = gen_rtx_ASM_OPERANDS(VOIDmode, + "mov x30, sp \n\t" + "orr sp, x30, #(1 << 55)", + "", + 0, + rtvec_alloc(0), + rtvec_alloc(0), + rtvec_alloc(0), + UNKNOWN_LOCATION); + MEM_VOLATILE_P(x) = true; + + next = NEXT_INSN(insn); + if (NOTE_P(next)) + insn = next; + + emit_insn_after(x, insn); + } + } + return 0; +} + +#define PASS_NAME arm64_rop_shield + +#define NO_GATE +#define TODO_FLAGS_FINISH TODO_dump_func +#include "gcc-generate-rtl-pass.h" + +__visible int plugin_init(struct plugin_name_args *plugin_info, + struct plugin_gcc_version *version) +{ + const struct plugin_argument *argv = plugin_info->argv; + int argc = plugin_info->argc; + bool enable = true; + int i; + + if (!plugin_default_version_check(version, &gcc_version)) { + error(G_("incompatible gcc/plugin versions")); + return 1; + } + + PASS_INFO(arm64_rop_shield, "shorten", 1, PASS_POS_INSERT_BEFORE); + + for (i = 0; i < argc; i++) { + if (!strcmp(argv[i].key, "disable")) { + enable = false; + continue; + } + error(G_("unknown option '-fplugin-arg-%s-%s'"), + plugin_info->base_name, argv[i].key); + } + + if (!enable) + return 0; + + register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP, + NULL, &arm64_rop_shield_pass_info); + + return 0; +} From patchwork Thu Aug 2 13:21:32 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 10553621 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id E7BF513BB for ; Thu, 2 Aug 2018 13:25:13 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id DACA425404 for ; Thu, 2 Aug 2018 13:25:13 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id CF5282BFA3; Thu, 2 Aug 2018 13:25:13 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.3 required=2.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_AU,MAILING_LIST_MULTI,RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from mother.openwall.net (mother.openwall.net [195.42.179.200]) by mail.wl.linuxfoundation.org (Postfix) with SMTP id E6FC82AF21 for ; Thu, 2 Aug 2018 13:25:12 +0000 (UTC) Received: (qmail 5774 invoked by uid 550); 2 Aug 2018 13:24:44 -0000 Mailing-List: contact kernel-hardening-help@lists.openwall.com; run by ezmlm Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-ID: Delivered-To: mailing list kernel-hardening@lists.openwall.com Received: (qmail 5476 invoked from network); 2 Aug 2018 13:24:38 -0000 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linaro.org; s=google; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=UxmaOcSt7U9Tkz2DoHy2PhphJn7qSF0D/IOJ3IU5rB4=; b=fJqDG2QCzt6jL8LJ7uHw2xVcN6h8j2eYlf9PNm88DMTA5tr2my6RCnHmA1lktGKEry GhNPuU5VkkTcemfcGJ/KvWUZv0WsTRnVossCqUuvHK7m8WEyObu1C1uqOF42Gkrk+zW/ OoCFxMh78Oe4v8z2io/crtEph6XMRV/bocGTE= X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=UxmaOcSt7U9Tkz2DoHy2PhphJn7qSF0D/IOJ3IU5rB4=; b=a3RRzq/QaxdTbW5C5zGtzRF5lpYLvQ1CCrKYtbAxo4vBCSM7nMFZv4dswVE4NloSeh De+52ZD7I62vGUcEZVRRsXVF/yjOrtb6BmZwYtFeAJ89kZIg/H3+8U7M4pRNjlSm10lG Xrdq5AkmEs+5qdGO++OCzXZpjja/kVwoWzMxmRx3leczIEtKIvfDNCIjKwzXkg7bdzXo ynsaGYXn7/0mdF97pl3x5W6rA4RSghQHfD28wwnT2/3RfjOqAToR3bv8gzf1o2WrQt39 sPOUD5IZrtiPUP+hiTyDKh8OSErlcewB4vu/Qz1aNaFsVrD8kKPK4TmLtknPdNFHmwJa 5PoA== X-Gm-Message-State: AOUpUlF+NhmBmbidk5hhZFnmA7ptijoG3mxlAQudefUctwckk4OmU2ch oPrPtj3PupFbm5OnOJA+4pzXwd7Re1owAg== X-Google-Smtp-Source: AAOMgpcDwCKwnC9FljNBDoZ3PnphQpvYBhdWsnJv0j9Gi7VNYPL7++wwa9UPwRfJszqPnsD7BqqG2g== X-Received: by 2002:a50:81c6:: with SMTP id 64-v6mr3381806ede.89.1533216266905; Thu, 02 Aug 2018 06:24:26 -0700 (PDT) From: Ard Biesheuvel To: kernel-hardening@lists.openwall.com Cc: keescook@chromium.org, christoffer.dall@arm.com, will.deacon@arm.com, catalin.marinas@arm.com, mark.rutland@arm.com, labbott@fedoraproject.org, linux-arm-kernel@lists.infradead.org, Ard Biesheuvel Subject: [RFC/PoC PATCH 3/3] arm64: enable ROP protection by clearing SP bit #55 across function returns Date: Thu, 2 Aug 2018 15:21:32 +0200 Message-Id: <20180802132133.23999-4-ard.biesheuvel@linaro.org> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20180802132133.23999-1-ard.biesheuvel@linaro.org> References: <20180802132133.23999-1-ard.biesheuvel@linaro.org> X-Virus-Scanned: ClamAV using ClamSMTP ROP attacks rely on a large supply of so-called 'gadgets', which are (in this context) short sequences of instructions ending in a stack pop and a return instruction. By exploiting a stack overflow to create a specially crafted stack frame, each gadget jumps to the next by popping off the next gadget's address as a fake return address, allowing non-trivial 'programs' to be executed by piecing together a large number of such gadgets. This attack vector relies heavily on the ability to jump to arbitrary places in the code. If we could limit where a function could return to, it is much more difficult to obtain critical mass in terms of a gadget collection that allows arbitrary attacks to be mounted. So let's try and do so by clearing bit #55 in the stack pointer register before returning from a function, and setting it again right after a 'bl' or 'blr' instruction. That way, jumping to arbitrary places in the code and popping the next gadget's address becomes a lot more complicated, since the stack pointer will not be valid after a function return until the 'reset' sequence is executed (or after an exception is taken). Signed-off-by: Ard Biesheuvel --- arch/arm64/Kconfig | 10 ++++++++++ arch/arm64/include/asm/assembler.h | 9 +++++++++ arch/arm64/kernel/entry.S | 18 ++++++++++++++++++ 3 files changed, 37 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 42c090cf0292..4562af0250b9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1011,6 +1011,16 @@ config ARM64_SW_TTBR0_PAN zeroed area and reserved ASID. The user access routines restore the valid TTBR0_EL1 temporarily. +config ARM64_ROP_SHIELD + bool "Enable basic ROP protection through the stack pointer sign bit" + depends on GCC_PLUGINS && VMAP_STACK + select GCC_PLUGIN_ARM64_ROP_SHIELD + help + Enable protection against ROP attacks by clearing bit #55 in the + stack pointer register across a function return. + + If paranoid, say Y here. If unsure, say N. + menu "ARMv8.1 architectural features" config ARM64_HW_AFDBM diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 346ada4de48a..95d3ec98eb58 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -701,12 +701,21 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .Lyield_out_\@ : .endm + .macro unclobber_sp, tmp +#ifdef CONFIG_ARM64_ROP_SHIELD + mov \tmp, sp + orr sp, \tmp, #(1 << 55) +#endif + .endm + .macro bl_c, target bl \target + unclobber_sp x30 .endm .macro blr_c, reg blr \reg + unclobber_sp x30 .endm #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index eba5b6b528ea..2adebca74f11 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -95,6 +95,9 @@ alternative_else_nop_endif */ add sp, sp, x0 // sp' = sp + x0 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp +#ifdef CONFIG_ARM64_ROP_SHIELD + tbz x0, #55, 1f +#endif tbnz x0, #THREAD_SHIFT, 0f sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp @@ -129,6 +132,21 @@ alternative_else_nop_endif /* We were already on the overflow stack. Restore sp/x0 and carry on. */ sub sp, sp, x0 mrs x0, tpidrro_el0 + b el\()\el\()_\label + +#ifdef CONFIG_ARM64_ROP_SHIELD +1: /* + * We have to do a little dance here to set bit 55 in the stack + * pointer register without clobbering anything else. + */ + orr x0, x0, #(1 << 55) + str x1, [x0] + mov x1, sp + mov sp, x0 + and x0, x0, #~(1 << 55) + sub x0, x1, x0 + ldr x1, [sp] +#endif #endif b el\()\el\()_\label .endm