diff mbox series

[RFC,7/9] arm64: assembler: add unwind annotations to frame push/pop macros

Message ID 20211013152243.2216899-8-ardb@kernel.org (mailing list archive)
State RFC
Headers show
Series arm64: use unwind data on GCC for shadow call stack | expand

Commit Message

Ard Biesheuvel Oct. 13, 2021, 3:22 p.m. UTC
In order to ensure that we can unwind from hand rolled assembly
routines, decorate the frame push/pop helper macros that are used by
non-leaf assembler routines with the appropriate annotations.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/include/asm/assembler.h | 26 +++++++++++++++++++-
 arch/arm64/include/asm/linkage.h   | 16 +++++++++++-
 2 files changed, 40 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index ceed84ac4005..cebb6c8c489b 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -664,9 +664,10 @@  alternative_endif
 	 *              the new value of sp. Add @extra bytes of stack space
 	 *              for locals.
 	 */
-	.macro		frame_push, regcount:req, extra
+	.macro		frame_push, regcount:req, extra=0
 #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
 	paciasp
+	.cfi_negate_ra_state
 #endif
 	__frame		st, \regcount, \extra
 	.endm
@@ -681,14 +682,29 @@  alternative_endif
 	__frame		ld
 #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
 	autiasp
+	.cfi_negate_ra_state
 #endif
 	.endm
 
 	.macro		__frame_regs, reg1, reg2, op, num
 	.if		.Lframe_regcount == \num
 	\op\()r		\reg1, [sp, #(\num + 1) * 8]
+	.ifc		\op, st
+	.cfi_offset	\reg1, -.Lframe_cfa_offset
+	.set		.Lframe_cfa_offset, .Lframe_cfa_offset - 8
+	.else
+	.cfi_restore	\reg1
+	.endif
 	.elseif		.Lframe_regcount > \num
 	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
+	.ifc		\op, st
+	.cfi_offset	\reg1, -.Lframe_cfa_offset
+	.cfi_offset	\reg2, -.Lframe_cfa_offset + 8
+	.set		.Lframe_cfa_offset, .Lframe_cfa_offset - 16
+	.else
+	.cfi_restore	\reg1
+	.cfi_restore	\reg2
+	.endif
 	.endif
 	.endm
 
@@ -708,7 +724,12 @@  alternative_endif
 	.set		.Lframe_regcount, \regcount
 	.set		.Lframe_extra, \extra
 	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
+	.set		.Lframe_cfa_offset, .Lframe_local_offset + .Lframe_extra
 	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
+	.cfi_def_cfa_offset .Lframe_cfa_offset
+	.cfi_offset	x29, -.Lframe_cfa_offset
+	.cfi_offset	x30, -.Lframe_cfa_offset + 8
+	.set		.Lframe_cfa_offset, .Lframe_cfa_offset - 16
 	mov		x29, sp
 	.endif
 
@@ -723,6 +744,9 @@  alternative_endif
 	.error		"frame_push/frame_pop may not be nested"
 	.endif
 	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
+	.cfi_restore	x29
+	.cfi_restore	x30
+	.cfi_def_cfa_offset 0
 	.set		.Lframe_regcount, -1
 	.endif
 	.endm
diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
index 9906541a6861..d984a6750b01 100644
--- a/arch/arm64/include/asm/linkage.h
+++ b/arch/arm64/include/asm/linkage.h
@@ -4,6 +4,9 @@ 
 #define __ALIGN		.align 2
 #define __ALIGN_STR	".align 2"
 
+#define SYM_FUNC_CFI_START	.cfi_startproc ;
+#define SYM_FUNC_CFI_END	.cfi_endproc ;
+
 #if defined(CONFIG_ARM64_BTI_KERNEL) && defined(__aarch64__)
 
 /*
@@ -12,6 +15,9 @@ 
  * instead.
  */
 #define BTI_C hint 34 ;
+#else
+#define BTI_C
+#endif
 
 /*
  * When using in-kernel BTI we need to ensure that PCS-conformant assembly
@@ -20,29 +26,37 @@ 
  */
 #define SYM_FUNC_START(name)				\
 	SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)	\
+	SYM_FUNC_CFI_START				\
 	BTI_C
 
 #define SYM_FUNC_START_NOALIGN(name)			\
 	SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)	\
+	SYM_FUNC_CFI_START				\
 	BTI_C
 
 #define SYM_FUNC_START_LOCAL(name)			\
 	SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)	\
+	SYM_FUNC_CFI_START				\
 	BTI_C
 
 #define SYM_FUNC_START_LOCAL_NOALIGN(name)		\
 	SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)	\
+	SYM_FUNC_CFI_START				\
 	BTI_C
 
 #define SYM_FUNC_START_WEAK(name)			\
 	SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN)	\
+	SYM_FUNC_CFI_START				\
 	BTI_C
 
 #define SYM_FUNC_START_WEAK_NOALIGN(name)		\
 	SYM_START(name, SYM_L_WEAK, SYM_A_NONE)		\
+	SYM_FUNC_CFI_START				\
 	BTI_C
 
-#endif
+#define SYM_FUNC_END(name)				\
+	SYM_FUNC_CFI_END				\
+	SYM_END(name, SYM_T_FUNC)
 
 /*
  * Annotate a function as position independent, i.e., safe to be called before