@@ -14,6 +14,7 @@
#include <asm/ptrace.h>
#include <asm/hwcap.h>
+#include <asm/usercfi.h>
#ifdef CONFIG_64BIT
#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
@@ -57,6 +57,9 @@ struct thread_info {
int cpu;
unsigned long syscall_work; /* SYSCALL_WORK_ flags */
unsigned long envcfg;
+#ifdef CONFIG_RISCV_USER_CFI
+ struct cfi_status user_cfi_state;
+#endif
#ifdef CONFIG_SHADOW_CALL_STACK
void *scs_base;
void *scs_sp;
new file mode 100644
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2024 Rivos, Inc.
+ * Deepak Gupta <debug@rivosinc.com>
+ */
+#ifndef _ASM_RISCV_USERCFI_H
+#define _ASM_RISCV_USERCFI_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+#ifdef CONFIG_RISCV_USER_CFI
+struct cfi_status {
+ unsigned long ubcfi_en : 1; /* Enable for backward cfi. */
+ unsigned long rsvd : ((sizeof(unsigned long)*8) - 1);
+ unsigned long user_shdw_stk; /* Current user shadow stack pointer */
+ unsigned long shdw_stk_base; /* Base address of shadow stack */
+ unsigned long shdw_stk_size; /* size of shadow stack */
+};
+
+#endif /* CONFIG_RISCV_USER_CFI */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_USERCFI_H */
@@ -44,6 +44,10 @@ void asm_offsets(void)
#endif
OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
+#ifdef CONFIG_RISCV_USER_CFI
+ OFFSET(TASK_TI_CFI_STATUS, task_struct, thread_info.user_cfi_state);
+ OFFSET(TASK_TI_USER_SSP, task_struct, thread_info.user_cfi_state.user_shdw_stk);
+#endif
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
@@ -60,6 +60,20 @@ SYM_CODE_START(handle_exception)
REG_L s0, TASK_TI_USER_SP(tp)
csrrc s1, CSR_STATUS, t0
+ /*
+ * If previous mode was U, capture shadow stack pointer and save it away
+ * Zero CSR_SSP at the same time for sanitization.
+ */
+ ALTERNATIVE("nop; nop; nop; nop",
+ __stringify( \
+ andi s2, s1, SR_SPP; \
+ bnez s2, skip_ssp_save; \
+ csrrw s2, CSR_SSP, x0; \
+ REG_S s2, TASK_TI_USER_SSP(tp); \
+ skip_ssp_save:),
+ 0,
+ RISCV_ISA_EXT_ZICFISS,
+ CONFIG_RISCV_USER_CFI)
csrr s2, CSR_EPC
csrr s3, CSR_TVAL
csrr s4, CSR_CAUSE
@@ -141,6 +155,18 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
* structures again.
*/
csrw CSR_SCRATCH, tp
+
+ /*
+ * Going back to U mode, restore shadow stack pointer
+ */
+ ALTERNATIVE("nop; nop",
+ __stringify( \
+ REG_L s3, TASK_TI_USER_SSP(tp); \
+ csrw CSR_SSP, s3),
+ 0,
+ RISCV_ISA_EXT_ZICFISS,
+ CONFIG_RISCV_USER_CFI)
+
1:
#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
move a0, sp
Carves out space in arch specific thread struct for cfi status and shadow stack in usermode on riscv. This patch does following - defines a new structure cfi_status with status bit for cfi feature - defines shadow stack pointer, base and size in cfi_status structure - defines offsets to new member fields in thread in asm-offsets.c - Saves and restore shadow stack pointer on trap entry (U --> S) and exit (S --> U) Shadow stack save/restore is gated on feature availiblity and implemented using alternative. CSR can be context switched in in `switch_to` as well but soon as kernel shadow stack support gets rolled in, shadow stack pointer will need to be switched at trap entry/exit point (much like `sp`). It can be argued that kernel using shadow stack deployment scenario may not be as prevalant as user mode using this feature. But even if there is some minimal deployment of kernel shadow stack, that means that it needs to be supported. And thus save/restore of shadow stack pointer in entry.S instead of in `switch_to.h`. Signed-off-by: Deepak Gupta <debug@rivosinc.com> --- arch/riscv/include/asm/processor.h | 1 + arch/riscv/include/asm/thread_info.h | 3 +++ arch/riscv/include/asm/usercfi.h | 24 ++++++++++++++++++++++++ arch/riscv/kernel/asm-offsets.c | 4 ++++ arch/riscv/kernel/entry.S | 26 ++++++++++++++++++++++++++ 5 files changed, 58 insertions(+) create mode 100644 arch/riscv/include/asm/usercfi.h