diff mbox series

ARM: vfp: avoid unbalanced stack on 'success' return path

Message ID 20230506161325.223362-1-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series ARM: vfp: avoid unbalanced stack on 'success' return path | expand

Commit Message

Ard Biesheuvel May 6, 2023, 4:13 p.m. UTC
Commit c76c6c4ecbec0deb5 ("ARM: 9294/2: vfp: Fix broken softirq handling
with instrumentation enabled") updated the VFP exception entry logic to
go via a C function, so that we get the compiler's version of
local_bh_disable(), which may be instrumented, and isn't generally
callable from assembler.

However, this assumes that passing an alternative 'success' return
address works in C as it does in asm, and this is only the case if the C
calls in question are tail calls, as otherwise, the stack will need some
unwinding as well.

I have already sent patches to the list that replace most of the asm
logic with C code, and so it is preferable to have a minimal fix that
addresses the issue and can be backported along with the commit that it
fixes to v6.3 from v6.4. Hopefully, we can land the C conversion for v6.5.

So instead of passing the 'success' return address as a function
argument, pass the stack address from where to pop it so that both LR
and SP have the expected value.

Fixes: c76c6c4ecbec0deb5 ("ARM: 9294/2: vfp: Fix broken softirq handling with ...")
Reported-by: syzbot+d4b00edc2d0c910d4bf4@syzkaller.appspotmail.com
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
This is one of those cases where you scratch your head and ask yourself
how it ever worked in the first place. I've tested this stuff under KVM
but also on BeagleBone and the original Raspberry Pi, but apparently,
none of the configs I've tested had the frame pointer unwinder enabled,
and this is what appears to result in vfp_support_entry() not being
tail-called from vfp_entry() but from a proper stack frame, and this
blows up the first time you call it. Oh well.

 arch/arm/vfp/entry.S | 7 +++++--
 arch/arm/vfp/vfphw.S | 6 ++++--
 2 files changed, 9 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index 7483ef8bccda394c..62206ef250371cd3 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -23,6 +23,9 @@ 
 @
 ENTRY(do_vfp)
 	mov	r1, r10
-	mov	r3, r9
-	b	vfp_entry
+	str	lr, [sp, #-8]!
+	add	r3, sp, #4
+	str	r9, [r3]
+	bl	vfp_entry
+	ldr	pc, [sp], #8
 ENDPROC(do_vfp)
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 4d8478264d82b3d2..a4610d0f321527cc 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -172,13 +172,14 @@  vfp_hw_state_valid:
 					@ out before setting an FPEXC that
 					@ stops us reading stuff
 	VFPFMXR	FPEXC, r1		@ Restore FPEXC last
+	mov	sp, r3			@ we think we have handled things
+	pop	{lr}
 	sub	r2, r2, #4		@ Retry current instruction - if Thumb
 	str	r2, [sp, #S_PC]		@ mode it's two 16-bit instructions,
 					@ else it's one 32-bit instruction, so
 					@ always subtract 4 from the following
 					@ instruction address.
 
-	mov	lr, r3			@ we think we have handled things
 local_bh_enable_and_ret:
 	adr	r0, .
 	mov	r1, #SOFTIRQ_DISABLE_OFFSET
@@ -209,8 +210,9 @@  skip:
 
 process_exception:
 	DBGSTR	"bounce"
+	mov	sp, r3			@ setup for a return to the user code.
+	pop	{lr}
 	mov	r2, sp			@ nothing stacked - regdump is at TOS
-	mov	lr, r3			@ setup for a return to the user code.
 
 	@ Now call the C code to package up the bounce to the support code
 	@   r0 holds the trigger instruction