diff mbox series

[3/6] arm64: scs: Use 'scs_sp' register alias for x18

Message ID 20200515172756.27185-4-will@kernel.org (mailing list archive)
State Mainlined
Commit 711e8b0de0d63c70c825b473da01288b661a2386
Headers show
Series Clean up Shadow Call Stack patches for 5.8 | expand

Commit Message

Will Deacon May 15, 2020, 5:27 p.m. UTC
x18 holds the SCS stack pointer value, so introduce a register alias to
make this easier to read in assembly code.

Signed-off-by: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/scs.h |  6 ++++--
 arch/arm64/kernel/entry.S    | 10 +++++-----
 arch/arm64/kernel/head.S     |  2 +-
 3 files changed, 10 insertions(+), 8 deletions(-)

Comments

Mark Rutland May 18, 2020, 11:55 a.m. UTC | #1
On Fri, May 15, 2020 at 06:27:53PM +0100, Will Deacon wrote:
> x18 holds the SCS stack pointer value, so introduce a register alias to
> make this easier to read in assembly code.
> 
> Signed-off-by: Will Deacon <will@kernel.org>

I scanned through arm64 for all instances of x18, and it looks like
you've covered all the relevant uses here. In kvm we save/restore x18 a
bunch becasue it might be a platform register, but we do that
unconditionally and without knowledge of what it contains, so I think
that's fine to leave as-is. Therefore:

Reviewed-by: Mark Rutland <mark.rutland@arm.com>

As an aside, the comment in entry-ftrace.S is now stale where it says
that x18 is safe to clobber. I can send a patch to clean that up, unless
you want to do that yourself.

Mark.

> ---
>  arch/arm64/include/asm/scs.h |  6 ++++--
>  arch/arm64/kernel/entry.S    | 10 +++++-----
>  arch/arm64/kernel/head.S     |  2 +-
>  3 files changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
> index 6b8cf4352fe3..d46efdd2060a 100644
> --- a/arch/arm64/include/asm/scs.h
> +++ b/arch/arm64/include/asm/scs.h
> @@ -7,12 +7,14 @@
>  #include <asm/asm-offsets.h>
>  
>  #ifdef CONFIG_SHADOW_CALL_STACK
> +	scs_sp	.req	x18
> +
>  	.macro scs_load tsk, tmp
> -	ldr	x18, [\tsk, #TSK_TI_SCS_SP]
> +	ldr	scs_sp, [\tsk, #TSK_TI_SCS_SP]
>  	.endm
>  
>  	.macro scs_save tsk, tmp
> -	str	x18, [\tsk, #TSK_TI_SCS_SP]
> +	str	scs_sp, [\tsk, #TSK_TI_SCS_SP]
>  	.endm
>  #else
>  	.macro scs_load tsk, tmp
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index cb0516e6f963..741faf0706f1 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -394,7 +394,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
>  	.macro	irq_stack_entry
>  	mov	x19, sp			// preserve the original sp
>  #ifdef CONFIG_SHADOW_CALL_STACK
> -	mov	x24, x18		// preserve the original shadow stack
> +	mov	x24, scs_sp		// preserve the original shadow stack
>  #endif
>  
>  	/*
> @@ -416,7 +416,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
>  
>  #ifdef CONFIG_SHADOW_CALL_STACK
>  	/* also switch to the irq shadow stack */
> -	adr_this_cpu x18, irq_shadow_call_stack, x26
> +	adr_this_cpu scs_sp, irq_shadow_call_stack, x26
>  #endif
>  
>  9998:
> @@ -430,7 +430,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
>  	.macro	irq_stack_exit
>  	mov	sp, x19
>  #ifdef CONFIG_SHADOW_CALL_STACK
> -	mov	x18, x24
> +	mov	scs_sp, x24
>  #endif
>  	.endm
>  
> @@ -1071,9 +1071,9 @@ SYM_CODE_START(__sdei_asm_handler)
>  #ifdef CONFIG_SHADOW_CALL_STACK
>  	/* Use a separate shadow call stack for normal and critical events */
>  	cbnz	w4, 3f
> -	adr_this_cpu dst=x18, sym=sdei_shadow_call_stack_normal, tmp=x6
> +	adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6
>  	b	4f
> -3:	adr_this_cpu dst=x18, sym=sdei_shadow_call_stack_critical, tmp=x6
> +3:	adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6
>  4:
>  #endif
>  
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index 2b01c19c5483..1293baddfd20 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -426,7 +426,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
>  	mov	x29, sp
>  
>  #ifdef CONFIG_SHADOW_CALL_STACK
> -	adr_l	x18, init_shadow_call_stack	// Set shadow call stack
> +	adr_l	scs_sp, init_shadow_call_stack	// Set shadow call stack
>  #endif
>  
>  	str_l	x21, __fdt_pointer, x5		// Save FDT pointer
> -- 
> 2.26.2.761.g0e0b3e54be-goog
>
Will Deacon May 18, 2020, 1:03 p.m. UTC | #2
On Mon, May 18, 2020 at 12:55:47PM +0100, Mark Rutland wrote:
> On Fri, May 15, 2020 at 06:27:53PM +0100, Will Deacon wrote:
> > x18 holds the SCS stack pointer value, so introduce a register alias to
> > make this easier to read in assembly code.
> > 
> > Signed-off-by: Will Deacon <will@kernel.org>
> 
> I scanned through arm64 for all instances of x18, and it looks like
> you've covered all the relevant uses here. In kvm we save/restore x18 a
> bunch becasue it might be a platform register, but we do that
> unconditionally and without knowledge of what it contains, so I think
> that's fine to leave as-is. Therefore:
> 
> Reviewed-by: Mark Rutland <mark.rutland@arm.com>
> 
> As an aside, the comment in entry-ftrace.S is now stale where it says
> that x18 is safe to clobber. I can send a patch to clean that up, unless
> you want to do that yourself.

Thanks, I'll fix that up (see below). Also, apologies for typo'ing your
email address when I sent this out on Friday.

Will

--->8

From 7e86208cd6541c1229bc1fcd206596308d1727f8 Mon Sep 17 00:00:00 2001
From: Will Deacon <will@kernel.org>
Date: Mon, 18 May 2020 14:01:01 +0100
Subject: [PATCH] arm64: entry-ftrace.S: Update comment to indicate that x18 is
 live

The Shadow Call Stack pointer is held in x18, so update the ftrace
entry comment to indicate that it cannot be safely clobbered.

Reported-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
---
 arch/arm64/kernel/entry-ftrace.S | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 833d48c9acb5..a338f40e64d3 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -23,8 +23,9 @@
  *
  * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
  *
- * Each instrumented function follows the AAPCS, so here x0-x8 and x19-x30 are
- * live, and x9-x18 are safe to clobber.
+ * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
+ * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
+ * clobber.
  *
  * We save the callsite's context into a pt_regs before invoking any ftrace
  * callbacks. So that we can get a sensible backtrace, we create a stack record
Mark Rutland May 18, 2020, 1:13 p.m. UTC | #3
On Mon, May 18, 2020 at 02:03:36PM +0100, Will Deacon wrote:
> On Mon, May 18, 2020 at 12:55:47PM +0100, Mark Rutland wrote:
> > On Fri, May 15, 2020 at 06:27:53PM +0100, Will Deacon wrote:
> > > x18 holds the SCS stack pointer value, so introduce a register alias to
> > > make this easier to read in assembly code.
> > > 
> > > Signed-off-by: Will Deacon <will@kernel.org>
> > 
> > I scanned through arm64 for all instances of x18, and it looks like
> > you've covered all the relevant uses here. In kvm we save/restore x18 a
> > bunch becasue it might be a platform register, but we do that
> > unconditionally and without knowledge of what it contains, so I think
> > that's fine to leave as-is. Therefore:
> > 
> > Reviewed-by: Mark Rutland <mark.rutland@arm.com>
> > 
> > As an aside, the comment in entry-ftrace.S is now stale where it says
> > that x18 is safe to clobber. I can send a patch to clean that up, unless
> > you want to do that yourself.
> 
> Thanks, I'll fix that up (see below). Also, apologies for typo'ing your
> email address when I sent this out on Friday.

No worries; I'd already forgotten!

> 
> Will
> 
> --->8
> 
> From 7e86208cd6541c1229bc1fcd206596308d1727f8 Mon Sep 17 00:00:00 2001
> From: Will Deacon <will@kernel.org>
> Date: Mon, 18 May 2020 14:01:01 +0100
> Subject: [PATCH] arm64: entry-ftrace.S: Update comment to indicate that x18 is
>  live
> 
> The Shadow Call Stack pointer is held in x18, so update the ftrace
> entry comment to indicate that it cannot be safely clobbered.
> 
> Reported-by: Mark Rutland <mark.rutland@arm.com>
> Signed-off-by: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/kernel/entry-ftrace.S | 5 +++--
>  1 file changed, 3 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
> index 833d48c9acb5..a338f40e64d3 100644
> --- a/arch/arm64/kernel/entry-ftrace.S
> +++ b/arch/arm64/kernel/entry-ftrace.S
> @@ -23,8 +23,9 @@
>   *
>   * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
>   *
> - * Each instrumented function follows the AAPCS, so here x0-x8 and x19-x30 are
> - * live, and x9-x18 are safe to clobber.
> + * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
> + * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
> + * clobber.

I'd have called out x18 a bit more specifically:

| Each instrumented function follows the AAPCS, so here x0-x8 and x19-x30 are
| live, x18 is potentially live with a shadow call stack pointer, and
| x9-x17 are safe to clobber.

... but either way this looks good; thanks!

Mark.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
index 6b8cf4352fe3..d46efdd2060a 100644
--- a/arch/arm64/include/asm/scs.h
+++ b/arch/arm64/include/asm/scs.h
@@ -7,12 +7,14 @@ 
 #include <asm/asm-offsets.h>
 
 #ifdef CONFIG_SHADOW_CALL_STACK
+	scs_sp	.req	x18
+
 	.macro scs_load tsk, tmp
-	ldr	x18, [\tsk, #TSK_TI_SCS_SP]
+	ldr	scs_sp, [\tsk, #TSK_TI_SCS_SP]
 	.endm
 
 	.macro scs_save tsk, tmp
-	str	x18, [\tsk, #TSK_TI_SCS_SP]
+	str	scs_sp, [\tsk, #TSK_TI_SCS_SP]
 	.endm
 #else
 	.macro scs_load tsk, tmp
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index cb0516e6f963..741faf0706f1 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -394,7 +394,7 @@  alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
 	.macro	irq_stack_entry
 	mov	x19, sp			// preserve the original sp
 #ifdef CONFIG_SHADOW_CALL_STACK
-	mov	x24, x18		// preserve the original shadow stack
+	mov	x24, scs_sp		// preserve the original shadow stack
 #endif
 
 	/*
@@ -416,7 +416,7 @@  alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
 
 #ifdef CONFIG_SHADOW_CALL_STACK
 	/* also switch to the irq shadow stack */
-	adr_this_cpu x18, irq_shadow_call_stack, x26
+	adr_this_cpu scs_sp, irq_shadow_call_stack, x26
 #endif
 
 9998:
@@ -430,7 +430,7 @@  alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
 	.macro	irq_stack_exit
 	mov	sp, x19
 #ifdef CONFIG_SHADOW_CALL_STACK
-	mov	x18, x24
+	mov	scs_sp, x24
 #endif
 	.endm
 
@@ -1071,9 +1071,9 @@  SYM_CODE_START(__sdei_asm_handler)
 #ifdef CONFIG_SHADOW_CALL_STACK
 	/* Use a separate shadow call stack for normal and critical events */
 	cbnz	w4, 3f
-	adr_this_cpu dst=x18, sym=sdei_shadow_call_stack_normal, tmp=x6
+	adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6
 	b	4f
-3:	adr_this_cpu dst=x18, sym=sdei_shadow_call_stack_critical, tmp=x6
+3:	adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6
 4:
 #endif
 
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 2b01c19c5483..1293baddfd20 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -426,7 +426,7 @@  SYM_FUNC_START_LOCAL(__primary_switched)
 	mov	x29, sp
 
 #ifdef CONFIG_SHADOW_CALL_STACK
-	adr_l	x18, init_shadow_call_stack	// Set shadow call stack
+	adr_l	scs_sp, init_shadow_call_stack	// Set shadow call stack
 #endif
 
 	str_l	x21, __fdt_pointer, x5		// Save FDT pointer