diff mbox series

[v3,4/4] arm64: ftrace: Add return address protection

Message ID 20221209152048.3517080-5-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: Add return address protection to asm code | expand

Commit Message

Ard Biesheuvel Dec. 9, 2022, 3:20 p.m. UTC
Use the newly added asm macros to protect and restore the return address
in the ftrace call wrappers, based on whichever method is active (PAC
and/or shadow call stack).

If the graph tracer is in use, this covers both the return address *to*
the ftrace call site as well as the return address *at* the call site,
and the latter will either be restored in return_to_handler(), or before
returning to the call site.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/kernel/entry-ftrace.S | 17 ++++++++++++++++-
 arch/arm64/kernel/stacktrace.c   |  1 +
 2 files changed, 17 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index bccd525241ab615d..4acfe12ac594da58 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -33,9 +33,13 @@ 
  * record, its caller is missing from the LR and existing chain of frame
  * records.
  */
+
 SYM_CODE_START(ftrace_caller)
 	bti	c
 
+	protect_return_address x9
+	protect_return_address x30
+
 	/* Save original SP */
 	mov	x10, sp
 
@@ -65,6 +69,9 @@  SYM_CODE_START(ftrace_caller)
 	stp	x29, x30, [sp, #FREGS_SIZE]
 	add	x29, sp, #FREGS_SIZE
 
+	alternative_insn  nop, "xpaci x30", ARM64_HAS_ADDRESS_AUTH, \
+					IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)
+
 	sub	x0, x30, #AARCH64_INSN_SIZE	// ip (callsite's BL insn)
 	mov	x1, x9				// parent_ip (callsite's LR)
 	ldr_l	x2, function_trace_op		// op
@@ -93,7 +100,14 @@  SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
 	/* Restore the callsite's SP */
 	add	sp, sp, #FREGS_SIZE + 32
 
-	ret	x10
+	restore_return_address x10
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	// Check whether the callsite's LR has been overridden
+	cmp	x9, x30
+	b.ne	0f
+#endif
+	restore_return_address x30
+0:	ret	x10
 SYM_CODE_END(ftrace_caller)
 
 #else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
@@ -265,6 +279,7 @@  SYM_CODE_START(return_to_handler)
 	ldp x6, x7, [sp, #48]
 	add sp, sp, #64
 
+	restore_return_address x30
 	ret
 SYM_CODE_END(return_to_handler)
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 634279b3b03d1b07..e323a8ac50168261 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -102,6 +102,7 @@  static int notrace unwind_next(struct unwind_state *state)
 		 */
 		orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
 						(void *)state->fp);
+		orig_pc = ptrauth_strip_insn_pac(orig_pc);
 		if (WARN_ON_ONCE(state->pc == orig_pc))
 			return -EINVAL;
 		state->pc = orig_pc;