diff mbox series

[v2,1/2] arm64: Always load shadow stack pointer directly from the task struct

Message ID 20230109174800.3286265-2-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: harden shadow call stack pointer handling | expand

Commit Message

Ard Biesheuvel Jan. 9, 2023, 5:47 p.m. UTC
All occurrences of the scs_load macro load the value of the shadow call
stack pointer from the task which is current at that point. So instead
of taking a task struct register argument in the scs_load macro to
specify the task struct to load from, let's always reference the current
task directly. This should make it much harder to exploit any
instruction sequences reloading the shadow call stack pointer register
from memory.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/include/asm/scs.h | 7 ++++---
 arch/arm64/kernel/entry.S    | 4 ++--
 arch/arm64/kernel/head.S     | 2 +-
 3 files changed, 7 insertions(+), 6 deletions(-)

Comments

Mark Rutland Jan. 10, 2023, 2:55 p.m. UTC | #1
On Mon, Jan 09, 2023 at 06:47:59PM +0100, Ard Biesheuvel wrote:
> All occurrences of the scs_load macro load the value of the shadow call
> stack pointer from the task which is current at that point. So instead
> of taking a task struct register argument in the scs_load macro to
> specify the task struct to load from, let's always reference the current
> task directly. This should make it much harder to exploit any
> instruction sequences reloading the shadow call stack pointer register
> from memory.
> 
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

Makes sense to me.

Acked-by: Mark Rutland <mark.rutland@arm.com>

Mark.

> ---
>  arch/arm64/include/asm/scs.h | 7 ++++---
>  arch/arm64/kernel/entry.S    | 4 ++--
>  arch/arm64/kernel/head.S     | 2 +-
>  3 files changed, 7 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
> index ff7da1268a52ab79..13df982a080805e6 100644
> --- a/arch/arm64/include/asm/scs.h
> +++ b/arch/arm64/include/asm/scs.h
> @@ -10,15 +10,16 @@
>  #ifdef CONFIG_SHADOW_CALL_STACK
>  	scs_sp	.req	x18
>  
> -	.macro scs_load tsk
> -	ldr	scs_sp, [\tsk, #TSK_TI_SCS_SP]
> +	.macro scs_load_current
> +	get_current_task scs_sp
> +	ldr	scs_sp, [scs_sp, #TSK_TI_SCS_SP]
>  	.endm
>  
>  	.macro scs_save tsk
>  	str	scs_sp, [\tsk, #TSK_TI_SCS_SP]
>  	.endm
>  #else
> -	.macro scs_load tsk
> +	.macro scs_load_current
>  	.endm
>  
>  	.macro scs_save tsk
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index 11cb99c4d298784d..546f7773238ea45d 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -275,7 +275,7 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
>  alternative_else_nop_endif
>  1:
>  
> -	scs_load tsk
> +	scs_load_current
>  	.else
>  	add	x21, sp, #PT_REGS_SIZE
>  	get_current_task tsk
> @@ -848,7 +848,7 @@ SYM_FUNC_START(cpu_switch_to)
>  	msr	sp_el0, x1
>  	ptrauth_keys_install_kernel x1, x8, x9, x10
>  	scs_save x0
> -	scs_load x1
> +	scs_load_current
>  	ret
>  SYM_FUNC_END(cpu_switch_to)
>  NOKPROBE(cpu_switch_to)
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index 952e17bd1c0b4f91..b9c1a506798ea315 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -404,7 +404,7 @@ SYM_FUNC_END(create_kernel_mapping)
>  	stp	xzr, xzr, [sp, #S_STACKFRAME]
>  	add	x29, sp, #S_STACKFRAME
>  
> -	scs_load \tsk
> +	scs_load_current
>  
>  	adr_l	\tmp1, __per_cpu_offset
>  	ldr	w\tmp2, [\tsk, #TSK_TI_CPU]
> -- 
> 2.39.0
>
Kees Cook Jan. 12, 2023, 10:18 p.m. UTC | #2
On Mon, Jan 09, 2023 at 06:47:59PM +0100, Ard Biesheuvel wrote:
> All occurrences of the scs_load macro load the value of the shadow call
> stack pointer from the task which is current at that point. So instead
> of taking a task struct register argument in the scs_load macro to
> specify the task struct to load from, let's always reference the current
> task directly. This should make it much harder to exploit any
> instruction sequences reloading the shadow call stack pointer register
> from memory.
> 
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

Reviewed-by: Kees Cook <keescook@chromium.org>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
index ff7da1268a52ab79..13df982a080805e6 100644
--- a/arch/arm64/include/asm/scs.h
+++ b/arch/arm64/include/asm/scs.h
@@ -10,15 +10,16 @@ 
 #ifdef CONFIG_SHADOW_CALL_STACK
 	scs_sp	.req	x18
 
-	.macro scs_load tsk
-	ldr	scs_sp, [\tsk, #TSK_TI_SCS_SP]
+	.macro scs_load_current
+	get_current_task scs_sp
+	ldr	scs_sp, [scs_sp, #TSK_TI_SCS_SP]
 	.endm
 
 	.macro scs_save tsk
 	str	scs_sp, [\tsk, #TSK_TI_SCS_SP]
 	.endm
 #else
-	.macro scs_load tsk
+	.macro scs_load_current
 	.endm
 
 	.macro scs_save tsk
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 11cb99c4d298784d..546f7773238ea45d 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -275,7 +275,7 @@  alternative_if ARM64_HAS_ADDRESS_AUTH
 alternative_else_nop_endif
 1:
 
-	scs_load tsk
+	scs_load_current
 	.else
 	add	x21, sp, #PT_REGS_SIZE
 	get_current_task tsk
@@ -848,7 +848,7 @@  SYM_FUNC_START(cpu_switch_to)
 	msr	sp_el0, x1
 	ptrauth_keys_install_kernel x1, x8, x9, x10
 	scs_save x0
-	scs_load x1
+	scs_load_current
 	ret
 SYM_FUNC_END(cpu_switch_to)
 NOKPROBE(cpu_switch_to)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 952e17bd1c0b4f91..b9c1a506798ea315 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -404,7 +404,7 @@  SYM_FUNC_END(create_kernel_mapping)
 	stp	xzr, xzr, [sp, #S_STACKFRAME]
 	add	x29, sp, #S_STACKFRAME
 
-	scs_load \tsk
+	scs_load_current
 
 	adr_l	\tmp1, __per_cpu_offset
 	ldr	w\tmp2, [\tsk, #TSK_TI_CPU]