diff mbox series

[5.10.y,v2] Revert "arm64: Stash shadow stack pointer in the task struct on interrupt"

Message ID 20240219132153.378265-1-xiangyang3@huawei.com (mailing list archive)
State New, archived
Headers show
Series [5.10.y,v2] Revert "arm64: Stash shadow stack pointer in the task struct on interrupt" | expand

Commit Message

Xiang Yang Feb. 19, 2024, 1:21 p.m. UTC
This reverts commit 3f225f29c69c13ce1cbdb1d607a42efeef080056.

The shadow call stack for irq now is stored in current task's thread info
in irq_stack_entry. There is a possibility that we have some soft irqs
pending at the end of hard irq, and when we process softirq with the irq
enabled, irq_stack_entry will enter again and overwrite the shadow call
stack whitch stored in current task's thread info, leading to the
incorrect shadow call stack restoration for the first entry of the hard
IRQ, then the system end up with a panic.

task A                               |  task A
-------------------------------------+------------------------------------
el1_irq        //irq1 enter          |
  irq_handler  //save scs_sp1        |
    gic_handle_irq                   |
    irq_exit                         |
      __do_softirq                   |
                                     | el1_irq         //irq2 enter
                                     |   irq_handler   //save scs_sp2
                                     |                 //overwrite scs_sp1
                                     |   ...
                                     |   irq_stack_exit //restore scs_sp2
  irq_stack_exit //restore wrong     |
                 //scs_sp2           |

So revert this commit to fix it.

Fixes: 3f225f29c69c ("arm64: Stash shadow stack pointer in the task struct on interrupt")

Signed-off-by: Xiang Yang <xiangyang3@huawei.com>
---
 arch/arm64/kernel/entry.S | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

Comments

Ard Biesheuvel Feb. 19, 2024, 4:57 p.m. UTC | #1
On Mon, 19 Feb 2024 at 14:24, Xiang Yang <xiangyang3@huawei.com> wrote:
>
> This reverts commit 3f225f29c69c13ce1cbdb1d607a42efeef080056.
>
> The shadow call stack for irq now is stored in current task's thread info
> in irq_stack_entry. There is a possibility that we have some soft irqs
> pending at the end of hard irq, and when we process softirq with the irq
> enabled, irq_stack_entry will enter again and overwrite the shadow call
> stack whitch stored in current task's thread info, leading to the
> incorrect shadow call stack restoration for the first entry of the hard
> IRQ, then the system end up with a panic.
>
> task A                               |  task A
> -------------------------------------+------------------------------------
> el1_irq        //irq1 enter          |
>   irq_handler  //save scs_sp1        |
>     gic_handle_irq                   |
>     irq_exit                         |
>       __do_softirq                   |
>                                      | el1_irq         //irq2 enter
>                                      |   irq_handler   //save scs_sp2
>                                      |                 //overwrite scs_sp1
>                                      |   ...
>                                      |   irq_stack_exit //restore scs_sp2
>   irq_stack_exit //restore wrong     |
>                  //scs_sp2           |
>
> So revert this commit to fix it.
>
> Fixes: 3f225f29c69c ("arm64: Stash shadow stack pointer in the task struct on interrupt")
>
> Signed-off-by: Xiang Yang <xiangyang3@huawei.com>

Acked-by: Ard Biesheuvel <ardb@kernel.org>



> ---
>  arch/arm64/kernel/entry.S | 8 ++++++--
>  1 file changed, 6 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index a94acea770c7..020a455824be 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -431,7 +431,9 @@ SYM_CODE_END(__swpan_exit_el0)
>
>         .macro  irq_stack_entry
>         mov     x19, sp                 // preserve the original sp
> -       scs_save tsk                    // preserve the original shadow stack
> +#ifdef CONFIG_SHADOW_CALL_STACK
> +       mov     x24, scs_sp             // preserve the original shadow stack
> +#endif
>
>         /*
>          * Compare sp with the base of the task stack.
> @@ -465,7 +467,9 @@ SYM_CODE_END(__swpan_exit_el0)
>          */
>         .macro  irq_stack_exit
>         mov     sp, x19
> -       scs_load_current
> +#ifdef CONFIG_SHADOW_CALL_STACK
> +       mov     scs_sp, x24
> +#endif
>         .endm
>
>  /* GPRs used by entry code */
> --
> 2.34.1
>
Greg KH Feb. 20, 2024, 3:31 p.m. UTC | #2
On Mon, Feb 19, 2024 at 05:57:07PM +0100, Ard Biesheuvel wrote:
> On Mon, 19 Feb 2024 at 14:24, Xiang Yang <xiangyang3@huawei.com> wrote:
> >
> > This reverts commit 3f225f29c69c13ce1cbdb1d607a42efeef080056.
> >
> > The shadow call stack for irq now is stored in current task's thread info
> > in irq_stack_entry. There is a possibility that we have some soft irqs
> > pending at the end of hard irq, and when we process softirq with the irq
> > enabled, irq_stack_entry will enter again and overwrite the shadow call
> > stack whitch stored in current task's thread info, leading to the
> > incorrect shadow call stack restoration for the first entry of the hard
> > IRQ, then the system end up with a panic.
> >
> > task A                               |  task A
> > -------------------------------------+------------------------------------
> > el1_irq        //irq1 enter          |
> >   irq_handler  //save scs_sp1        |
> >     gic_handle_irq                   |
> >     irq_exit                         |
> >       __do_softirq                   |
> >                                      | el1_irq         //irq2 enter
> >                                      |   irq_handler   //save scs_sp2
> >                                      |                 //overwrite scs_sp1
> >                                      |   ...
> >                                      |   irq_stack_exit //restore scs_sp2
> >   irq_stack_exit //restore wrong     |
> >                  //scs_sp2           |
> >
> > So revert this commit to fix it.
> >
> > Fixes: 3f225f29c69c ("arm64: Stash shadow stack pointer in the task struct on interrupt")
> >
> > Signed-off-by: Xiang Yang <xiangyang3@huawei.com>
> 
> Acked-by: Ard Biesheuvel <ardb@kernel.org>

Now queued up, thanks.

greg k-h
diff mbox series

Patch

diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a94acea770c7..020a455824be 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -431,7 +431,9 @@  SYM_CODE_END(__swpan_exit_el0)
 
 	.macro	irq_stack_entry
 	mov	x19, sp			// preserve the original sp
-	scs_save tsk			// preserve the original shadow stack
+#ifdef CONFIG_SHADOW_CALL_STACK
+	mov	x24, scs_sp		// preserve the original shadow stack
+#endif
 
 	/*
 	 * Compare sp with the base of the task stack.
@@ -465,7 +467,9 @@  SYM_CODE_END(__swpan_exit_el0)
 	 */
 	.macro	irq_stack_exit
 	mov	sp, x19
-	scs_load_current
+#ifdef CONFIG_SHADOW_CALL_STACK
+	mov	scs_sp, x24
+#endif
 	.endm
 
 /* GPRs used by entry code */