@@ -86,6 +86,10 @@
#define IMM12_MASK 0xfff
+/* the frame pointer used for stack unwinding */
+ARM( fpreg .req r11 )
+THUMB( fpreg .req r7 )
+
/*
* Enable and disable interrupts
*/
@@ -32,9 +32,51 @@
/*
* Interrupt handling.
*/
- .macro irq_handler
+ .macro irq_handler, from_user:req
mov r0, sp
+#ifdef CONFIG_UNWINDER_ARM
+ mov fpreg, sp @ Preserve original SP
+#else
+ mov r7, fp @ Preserve original FP
+ mov r8, sp @ Preserve original SP
+#endif
+ ldr_this_cpu sp, irq_stack_ptr, r2, r3
+ .if \from_user == 0
+UNWIND( .setfp fpreg, sp )
+ @
+ @ If we took the interrupt while running in the kernel, we may already
+ @ be using the IRQ stack, so revert to the original value in that case.
+ @
+ subs r2, sp, r0 @ SP above bottom of IRQ stack?
+ rsbscs r2, r2, #THREAD_SIZE @ ... and below the top?
+ movcs sp, r0 @ If so, revert to incoming SP
+
+#ifndef CONFIG_UNWINDER_ARM
+ @
+ @ Inform the frame pointer unwinder where the next frame lives
+ @
+ movcc lr, pc @ Make LR point into .entry.text so
+ @ that we will get a dump of the
+ @ exception stack for this frame.
+#ifdef CONFIG_CC_IS_GCC
+ movcc ip, r0 @ Store the old SP in the frame record.
+ stmdbcc sp!, {fp, ip, lr, pc} @ Push frame record
+ addcc fp, sp, #12
+#else
+ stmdbcc sp!, {fp, lr} @ Push frame record
+ movcc fp, sp
+#endif // CONFIG_CC_IS_GCC
+#endif // CONFIG_UNWINDER_ARM
+ .endif
+
bl generic_handle_arch_irq
+
+#ifdef CONFIG_UNWINDER_ARM
+ mov sp, fpreg @ Restore original SP
+#else
+ mov fp, r7 @ Restore original FP
+ mov sp, r8 @ Restore original SP
+#endif // CONFIG_UNWINDER_ARM
.endm
.macro pabt_helper
@@ -191,7 +233,7 @@ ENDPROC(__dabt_svc)
.align 5
__irq_svc:
svc_entry
- irq_handler
+ irq_handler from_user=0
#ifdef CONFIG_PREEMPTION
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -418,7 +460,7 @@ ENDPROC(__dabt_usr)
__irq_usr:
usr_entry
kuser_cmpxchg_check
- irq_handler
+ irq_handler from_user=1
get_thread_info tsk
mov why, #0
b ret_to_user_from_irq
@@ -40,11 +40,24 @@ __irq_entry:
@ Invoke the IRQ handler
@
mov r0, sp
- stmdb sp!, {lr}
+ ldr_this_cpu sp, irq_stack_ptr, r1, r2
+
+ @
+ @ If we took the interrupt while running in the kernel, we may already
+ @ be using the IRQ stack, so revert to the original value in that case.
+ @
+ subs r2, sp, r0 @ SP above bottom of IRQ stack?
+ rsbscs r2, r2, #THREAD_SIZE @ ... and below the top?
+ movcs sp, r0
+
+ push {r0, lr} @ preserve LR and original SP
+
@ routine called with r0 = struct pt_regs *
bl generic_handle_arch_irq
- pop {lr}
+ pop {r0, lr}
+ mov sp, r0
+
@
@ Check for any pending work if returning to user
@
@@ -43,6 +43,21 @@
unsigned long irq_err_count;
+asmlinkage DEFINE_PER_CPU_READ_MOSTLY(u8 *, irq_stack_ptr);
+
+static void __init init_irq_stacks(void)
+{
+ u8 *stack;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ stack = (u8 *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+ if (WARN_ON(!stack))
+ break;
+ per_cpu(irq_stack_ptr, cpu) = &stack[THREAD_SIZE];
+ }
+}
+
int arch_show_interrupts(struct seq_file *p, int prec)
{
#ifdef CONFIG_FIQ
@@ -84,6 +99,8 @@ void __init init_IRQ(void)
{
int ret;
+ init_irq_stacks();
+
if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq)
irqchip_init();
else
@@ -66,6 +66,19 @@ void dump_backtrace_entry(unsigned long where, unsigned long from,
{
unsigned long end = frame + 4 + sizeof(struct pt_regs);
+ if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER) &&
+ IS_ENABLED(CONFIG_CC_IS_GCC) &&
+ end > ALIGN(frame, THREAD_SIZE)) {
+ /*
+ * If we are walking past the end of the stack, it may be due
+ * to the fact that we are on an IRQ or overflow stack. In this
+ * case, we can load the address of the other stack from the
+ * frame record.
+ */
+ frame = ((unsigned long *)frame)[-2] - 4;
+ end = frame + 4 + sizeof(struct pt_regs);
+ }
+
#ifndef CONFIG_KALLSYMS
printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
loglvl, where, from);
@@ -280,7 +293,7 @@ static int __die(const char *str, int err, struct pt_regs *regs)
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
- THREAD_SIZE + (unsigned long)task_stack_page(tsk));
+ ALIGN(regs->ARM_sp, THREAD_SIZE));
dump_backtrace(regs, tsk, KERN_EMERG);
dump_instr(KERN_EMERG, regs);
}
@@ -197,6 +197,13 @@ finished_setup:
cmp sv_fp, frame @ next frame must be
mov frame, sv_fp @ above the current frame
+
+ @
+ @ Kernel stacks may be discontiguous in memory. If the next
+ @ frame is below the previous frame, accept it as long as it
+ @ lives in kernel memory.
+ @
+ cmpls sv_fp, #PAGE_OFFSET
bhi for_each_frame
1006: adr r0, .Lbad
@@ -98,6 +98,13 @@ for_each_frame: tst frame, mask @ Check for address exceptions
cmp sv_fp, frame @ next frame must be
mov frame, sv_fp @ above the current frame
+
+ @
+ @ Kernel stacks may be discontiguous in memory. If the next
+ @ frame is below the previous frame, accept it as long as it
+ @ lives in kernel memory.
+ @
+ cmpls sv_fp, #PAGE_OFFSET
bhi for_each_frame
1006: adr r0, .Lbad