@@ -8,6 +8,7 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/prctl.h>
+#include <linux/errno.h>
struct task_struct;
struct kernel_clone_args;
@@ -35,6 +36,9 @@ bool is_shstk_locked(struct task_struct *task);
bool is_shstk_allocated(struct task_struct *task);
void set_shstk_lock(struct task_struct *task);
void set_shstk_status(struct task_struct *task, bool enable);
+unsigned long get_active_shstk(struct task_struct *task);
+int restore_user_shstk(struct task_struct *tsk, unsigned long shstk_ptr);
+int save_user_shstk(struct task_struct *tsk, unsigned long *saved_shstk_ptr);
bool is_indir_lp_enabled(struct task_struct *task);
bool is_indir_lp_locked(struct task_struct *task);
void set_indir_lp_status(struct task_struct *task, bool enable);
@@ -72,6 +76,12 @@ void set_indir_lp_lock(struct task_struct *task);
#define set_indir_lp_lock(task)
+#define restore_user_shstk(tsk, shstk_ptr) -EINVAL
+
+#define save_user_shstk(tsk, saved_shstk_ptr) -EINVAL
+
+#define get_active_shstk(task) 0UL
+
#endif /* CONFIG_RISCV_USER_CFI */
#endif /* __ASSEMBLY__ */
@@ -127,6 +127,10 @@ struct __riscv_v_regset_state {
*/
#define RISCV_MAX_VLENB (8192)
+struct __sc_riscv_cfi_state {
+ unsigned long ss_ptr; /* shadow stack pointer */
+};
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_ASM_RISCV_PTRACE_H */
@@ -10,6 +10,7 @@
/* The Magic number for signal context frame header. */
#define RISCV_V_MAGIC 0x53465457
+#define RISCV_ZICFISS_MAGIC 0x9487
#define END_MAGIC 0x0
/* The size of END signal context header. */
@@ -22,11 +22,13 @@
#include <asm/vector.h>
#include <asm/csr.h>
#include <asm/cacheflush.h>
+#include <asm/usercfi.h>
unsigned long signal_minsigstksz __ro_after_init;
extern u32 __user_rt_sigreturn[2];
static size_t riscv_v_sc_size __ro_after_init;
+static size_t riscv_zicfiss_sc_size __ro_after_init;
#define DEBUG_SIG 0
@@ -139,6 +141,62 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec)
return copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize);
}
+static long save_cfiss_state(struct pt_regs *regs, void __user *sc_cfi)
+{
+ struct __sc_riscv_cfi_state __user *state = sc_cfi;
+ unsigned long ss_ptr = 0;
+ long err = 0;
+
+ if (!IS_ENABLED(CONFIG_RISCV_USER_CFI) || !is_shstk_enabled(current))
+ return 0;
+
+ /*
+ * Save a pointer to shadow stack itself on shadow stack as a form of token.
+ * A token on shadow gives following properties
+ * - Safe save and restore for shadow stack switching. Any save of shadow stack
+ * must have had saved a token on shadow stack. Similarly any restore of shadow
+ * stack must check the token before restore. Since writing to shadow stack with
+ * address of shadow stack itself is not easily allowed. A restore without a save
+ * is quite difficult for an attacker to perform.
+ * - A natural break. A token in shadow stack provides a natural break in shadow stack
+ * So a single linear range can be bucketed into different shadow stack segments. Any
+ * sspopchk will detect the condition and fault to kernel as sw check exception.
+ */
+ err |= save_user_shstk(current, &ss_ptr);
+ err |= __put_user(ss_ptr, &state->ss_ptr);
+ if (unlikely(err))
+ return -EFAULT;
+
+ return riscv_zicfiss_sc_size;
+}
+
+static long __restore_cfiss_state(struct pt_regs *regs, void __user *sc_cfi)
+{
+ struct __sc_riscv_cfi_state __user *state = sc_cfi;
+ unsigned long ss_ptr = 0;
+ long err;
+
+ /*
+ * Restore shadow stack as a form of token stored on shadow stack itself as a safe
+ * way to restore.
+ * A token on shadow gives following properties
+ * - Safe save and restore for shadow stack switching. Any save of shadow stack
+ * must have had saved a token on shadow stack. Similarly any restore of shadow
+ * stack must check the token before restore. Since writing to shadow stack with
+ * address of shadow stack itself is not easily allowed. A restore without a save
+ * is quite difficult for an attacker to perform.
+ * - A natural break. A token in shadow stack provides a natural break in shadow stack
+ * So a single linear range can be bucketed into different shadow stack segments.
+ * sspopchk will detect the condition and fault to kernel as sw check exception.
+ */
+ err = __copy_from_user(&ss_ptr, &state->ss_ptr, sizeof(unsigned long));
+
+ if (unlikely(err))
+ return err;
+
+ return restore_user_shstk(current, ss_ptr);
+}
+
struct arch_ext_priv {
__u32 magic;
long (*save)(struct pt_regs *regs, void __user *sc_vec);
@@ -149,6 +207,10 @@ struct arch_ext_priv arch_ext_list[] = {
.magic = RISCV_V_MAGIC,
.save = &save_v_state,
},
+ {
+ .magic = RISCV_ZICFISS_MAGIC,
+ .save = &save_cfiss_state,
+ },
};
const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list);
@@ -200,6 +262,12 @@ static long restore_sigcontext(struct pt_regs *regs,
err = __restore_v_state(regs, sc_ext_ptr);
break;
+ case RISCV_ZICFISS_MAGIC:
+ if (!is_shstk_enabled(current) || size != riscv_zicfiss_sc_size)
+ return -EINVAL;
+
+ err = __restore_cfiss_state(regs, sc_ext_ptr);
+ break;
default:
return -EINVAL;
}
@@ -220,6 +288,10 @@ static size_t get_rt_frame_size(bool cal_all)
if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
total_context_size += riscv_v_sc_size;
}
+
+ if (is_shstk_enabled(current))
+ total_context_size += riscv_zicfiss_sc_size;
+
/*
* Preserved a __riscv_ctx_hdr for END signal context header if an
* extension uses __riscv_extra_ext_header
@@ -363,6 +435,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
#ifdef CONFIG_MMU
regs->ra = (unsigned long)VDSO_SYMBOL(
current->mm->context.vdso, rt_sigreturn);
+
+ /* if bcfi is enabled x1 (ra) and x5 (t0) must match. not sure if we need this? */
+ if (is_shstk_enabled(current))
+ regs->t0 = regs->ra;
+
#else
/*
* For the nommu case we don't have a VDSO. Instead we push two
@@ -491,6 +568,9 @@ void __init init_rt_signal_env(void)
{
riscv_v_sc_size = sizeof(struct __riscv_ctx_hdr) +
sizeof(struct __sc_riscv_v_state) + riscv_v_vsize;
+
+ riscv_zicfiss_sc_size = sizeof(struct __riscv_ctx_hdr) +
+ sizeof(struct __sc_riscv_cfi_state);
/*
* Determine the stack space required for guaranteed signal delivery.
* The signal_minsigstksz will be populated into the AT_MINSIGSTKSZ entry
@@ -52,6 +52,11 @@ void set_active_shstk(struct task_struct *task, unsigned long shstk_addr)
task->thread_info.user_cfi_state.user_shdw_stk = shstk_addr;
}
+unsigned long get_active_shstk(struct task_struct *task)
+{
+ return task->thread_info.user_cfi_state.user_shdw_stk;
+}
+
void set_shstk_status(struct task_struct *task, bool enable)
{
task->thread_info.user_cfi_state.ubcfi_en = enable ? 1 : 0;
@@ -164,6 +169,58 @@ static int create_rstor_token(unsigned long ssp, unsigned long *token_addr)
return 0;
}
+/*
+ * Save user shadow stack pointer on shadow stack itself and return pointer to saved location
+ * returns -EFAULT if operation was unsuccessful
+ */
+int save_user_shstk(struct task_struct *tsk, unsigned long *saved_shstk_ptr)
+{
+ unsigned long ss_ptr = 0;
+ unsigned long token_loc = 0;
+ int ret = 0;
+
+ if (saved_shstk_ptr == NULL)
+ return -EINVAL;
+
+ ss_ptr = get_active_shstk(tsk);
+ ret = create_rstor_token(ss_ptr, &token_loc);
+
+ if (!ret) {
+ *saved_shstk_ptr = token_loc;
+ set_active_shstk(tsk, token_loc);
+ }
+
+ return ret;
+}
+
+/*
+ * Restores user shadow stack pointer from token on shadow stack for task `tsk`
+ * returns -EFAULT if operation was unsuccessful
+ */
+int restore_user_shstk(struct task_struct *tsk, unsigned long shstk_ptr)
+{
+ unsigned long token = 0;
+
+ token = amo_user_shstk((unsigned long __user *)shstk_ptr, 0);
+
+ if (token == -1)
+ return -EFAULT;
+
+ /* invalid token, return EINVAL */
+ if ((token - shstk_ptr) != SHSTK_ENTRY_SIZE) {
+ pr_info_ratelimited(
+ "%s[%d]: bad restore token in %s: pc=%p sp=%p, token=%p, shstk_ptr=%p\n",
+ tsk->comm, task_pid_nr(tsk), __func__,
+ (void *)(task_pt_regs(tsk)->epc), (void *)(task_pt_regs(tsk)->sp),
+ (void *)token, (void *)shstk_ptr);
+ return -EINVAL;
+ }
+
+ /* all checks passed, set active shstk and return success */
+ set_active_shstk(tsk, token);
+ return 0;
+}
+
static unsigned long allocate_shadow_stack(unsigned long addr, unsigned long size,
unsigned long token_offset,
bool set_tok)