@@ -17,12 +17,14 @@ struct cet_status {
#ifdef CONFIG_X86_INTEL_CET
int cet_setup_shstk(void);
+int cet_setup_thread_shstk(struct task_struct *p);
void cet_disable_shstk(void);
void cet_disable_free_shstk(struct task_struct *p);
int cet_restore_signal(unsigned long ssp);
int cet_setup_signal(bool ia32, unsigned long rstor, unsigned long *new_ssp);
#else
static inline int cet_setup_shstk(void) { return 0; }
+static inline int cet_setup_thread_shstk(struct task_struct *p) { return 0; }
static inline void cet_disable_shstk(void) {}
static inline void cet_disable_free_shstk(struct task_struct *p) {}
static inline int cet_restore_signal(unsigned long ssp) { return 0; }
@@ -13,6 +13,7 @@
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
#include <asm/mpx.h>
+#include <asm/cet.h>
extern atomic64_t last_mm_ctx_id;
@@ -223,6 +224,8 @@ do { \
#else
#define deactivate_mm(tsk, mm) \
do { \
+ if (!tsk->vfork_done) \
+ cet_disable_free_shstk(tsk); \
load_gs_index(0); \
loadsegment(fs, 0); \
} while (0)
@@ -134,6 +134,40 @@ int cet_setup_shstk(void)
return 0;
}
+int cet_setup_thread_shstk(struct task_struct *tsk)
+{
+ unsigned long addr, size;
+ struct cet_user_state *state;
+
+ if (!current->thread.cet.shstk_enabled)
+ return 0;
+
+ state = get_xsave_addr(&tsk->thread.fpu.state.xsave,
+ XFEATURE_MASK_SHSTK_USER);
+
+ if (!state)
+ return -EINVAL;
+
+ size = tsk->thread.cet.shstk_size;
+ if (size == 0)
+ size = rlimit(RLIMIT_STACK);
+
+ addr = do_mmap_locked(0, size, PROT_READ,
+ MAP_ANONYMOUS | MAP_PRIVATE, VM_SHSTK);
+
+ if (addr >= TASK_SIZE_MAX) {
+ tsk->thread.cet.shstk_base = 0;
+ tsk->thread.cet.shstk_size = 0;
+ tsk->thread.cet.shstk_enabled = 0;
+ return -ENOMEM;
+ }
+
+ state->user_ssp = (u64)(addr + size - sizeof(u64));
+ tsk->thread.cet.shstk_base = addr;
+ tsk->thread.cet.shstk_size = size;
+ return 0;
+}
+
void cet_disable_shstk(void)
{
u64 r;
@@ -125,6 +125,7 @@ void exit_thread(struct task_struct *tsk)
free_vm86(t);
+ cet_disable_free_shstk(tsk);
fpu__drop(fpu);
}
@@ -317,6 +317,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
if (sp)
childregs->sp = sp;
+ /* Allocate a new shadow stack for pthread */
+ if ((clone_flags & (CLONE_VFORK | CLONE_VM)) == CLONE_VM) {
+ err = cet_setup_thread_shstk(p);
+ if (err)
+ goto out;
+ }
+
err = -ENOMEM;
if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
The shadow stack for clone/fork is handled as the following: (1) If ((clone_flags & (CLONE_VFORK | CLONE_VM)) == CLONE_VM), the kernel allocates (and frees on thread exit) a new SHSTK for the child. It is possible for the kernel to complete the clone syscall and set the child's SHSTK pointer to NULL and let the child thread allocate a SHSTK for itself. There are two issues in this approach: It is not compatible with existing code that does inline syscall and it cannot handle signals before the child can successfully allocate a SHSTK. (2) For (clone_flags & CLONE_VFORK), the child uses the existing SHSTK. (3) For all other cases, the SHSTK is copied/reused whenever the parent or the child does a call/ret. This patch handles cases (1) & (2). Case (3) is handled in the SHSTK page fault patches. Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com> --- arch/x86/include/asm/cet.h | 2 ++ arch/x86/include/asm/mmu_context.h | 3 +++ arch/x86/kernel/cet.c | 34 ++++++++++++++++++++++++++++++ arch/x86/kernel/process.c | 1 + arch/x86/kernel/process_64.c | 7 ++++++ 5 files changed, 47 insertions(+)