diff mbox series

[v26,24/30] x86/cet/shstk: Introduce shadow stack token setup/verify routines

Message ID 20210427204315.24153-25-yu-cheng.yu@intel.com (mailing list archive)
State New, archived
Headers show
Series Control-flow Enforcement: Shadow Stack | expand

Commit Message

Yu-cheng Yu April 27, 2021, 8:43 p.m. UTC
A shadow stack restore token marks a restore point of the shadow stack, and
the address in a token must point directly above the token, which is within
the same shadow stack.  This is distinctively different from other pointers
on the shadow stack, since those pointers point to executable code area.

The restore token can be used as an extra protection for signal handling.
To deliver a signal, create a shadow stack restore token and put the token
and the signal restorer address on the shadow stack.  In sigreturn, verify
the token and restore from it the shadow stack pointer.

Introduce token setup and verify routines.  Also introduce WRUSS, which is
a kernel-mode instruction but writes directly to user shadow stack.  It is
used to construct user signal stack as described above.

Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
Cc: Kees Cook <keescook@chromium.org>
---
v25:
- Update inline assembly syntax, use %[].
- Change token address from (unsigned long) to (u64/u32 __user *).
- Change -EPERM to -EFAULT.

 arch/x86/include/asm/cet.h           |   9 ++
 arch/x86/include/asm/special_insns.h |  32 +++++++
 arch/x86/kernel/shstk.c              | 126 +++++++++++++++++++++++++++
 3 files changed, 167 insertions(+)

Comments

Borislav Petkov May 17, 2021, 7:45 a.m. UTC | #1
On Tue, Apr 27, 2021 at 01:43:09PM -0700, Yu-cheng Yu wrote:
> +static inline int write_user_shstk_32(u32 __user *addr, u32 val)
> +{
> +	WARN_ONCE(1, "%s used but not supported.\n", __func__);
> +	return -EFAULT;
> +}
> +#endif

What is that supposed to catch? Any concrete (mis-)use cases?

> +
> +static inline int write_user_shstk_64(u64 __user *addr, u64 val)
> +{
> +	asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
> +			  _ASM_EXTABLE(1b, %l[fail])
> +			  :: [addr] "r" (addr), [val] "r" (val)
> +			  :: fail);
> +	return 0;
> +fail:
> +	return -EFAULT;
> +}
> +#endif /* CONFIG_X86_SHADOW_STACK */
> +
>  #define nop() asm volatile ("nop")
>  
>  static inline void serialize(void)
> diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
> index d387df84b7f1..48a0c87414ef 100644
> --- a/arch/x86/kernel/shstk.c
> +++ b/arch/x86/kernel/shstk.c
> @@ -20,6 +20,7 @@
>  #include <asm/fpu/xstate.h>
>  #include <asm/fpu/types.h>
>  #include <asm/cet.h>
> +#include <asm/special_insns.h>
>  
>  static void start_update_msrs(void)
>  {
> @@ -176,3 +177,128 @@ void shstk_disable(void)
>  
>  	shstk_free(current);
>  }
> +
> +static unsigned long _get_user_shstk_addr(void)

What's the "_" prefix in the name supposed to denote?

Ditto for the other functions with "_" prefix you're adding.

> +{
> +	struct fpu *fpu = &current->thread.fpu;
> +	unsigned long ssp = 0;
> +
> +	fpregs_lock();
> +
> +	if (fpregs_state_valid(fpu, smp_processor_id())) {
> +		rdmsrl(MSR_IA32_PL3_SSP, ssp);
> +	} else {
> +		struct cet_user_state *p;
> +
> +		p = get_xsave_addr(&fpu->state.xsave, XFEATURE_CET_USER);
> +		if (p)
> +			ssp = p->user_ssp;
> +	}
> +
> +	fpregs_unlock();

<---- newline here.

> +	return ssp;
> +}
> +
> +#define TOKEN_MODE_MASK	3UL
> +#define TOKEN_MODE_64	1UL
> +#define IS_TOKEN_64(token) (((token) & TOKEN_MODE_MASK) == TOKEN_MODE_64)
> +#define IS_TOKEN_32(token) (((token) & TOKEN_MODE_MASK) == 0)

Why do you have to look at the second, busy bit, too in order to
determine the mode?

Also, you don't need most of those defines - see below.

> +/*
> + * Create a restore token on the shadow stack.  A token is always 8-byte
> + * and aligned to 8.
> + */
> +static int _create_rstor_token(bool ia32, unsigned long ssp,
> +			       unsigned long *token_addr)
> +{
> +	unsigned long addr;
> +
> +	*token_addr = 0;

What for? Callers should check this function's retval and then interpret
the validity of token_addr and it should not unconditionally write into
it.

> +
> +	if ((!ia32 && !IS_ALIGNED(ssp, 8)) || !IS_ALIGNED(ssp, 4))

Flip this logic:

	if ((ia32 && !IS_ALIGNED(ssp, 4)) || !IS_ALIGNED(ssp, 8))

> +		return -EINVAL;
> +
> +	addr = ALIGN_DOWN(ssp, 8) - 8;

Yah, so this is weird. Why does the restore token need to be at -8
instead on the shadow stack address itself?

Looking at

Figure 18-2. RSTORSSP to Switch to New Shadow Stack
Figure 18-3. SAVEPREVSSP to Save a Restore Point

in the SDM, it looks like unnecessarily more complex than it should be.
But maybe there's some magic I'm missing.

> +
> +	/* Is the token for 64-bit? */
> +	if (!ia32)
> +		ssp |= TOKEN_MODE_64;

		    |= BIT(0);

> +
> +	if (write_user_shstk_64((u64 __user *)addr, (u64)ssp))
> +		return -EFAULT;
> +
> +	*token_addr = addr;

<---- newline here.

> +	return 0;
> +}
> +
> +/*
> + * Create a restore token on shadow stack, and then push the user-mode
> + * function return address.
> + */
> +int shstk_setup_rstor_token(bool ia32, unsigned long ret_addr,
> +			    unsigned long *token_addr, unsigned long *new_ssp)
> +{
> +	struct cet_status *cet = &current->thread.cet;
> +	unsigned long ssp = 0;
> +	int err = 0;

What are those cleared to 0 for?

> +
> +	if (cet->shstk_size) {

Flip logic to save an indentation level:

	if (!cet->shstk_size)
		return err;

	if (!ret_addr)
		...


> +		if (!ret_addr)
> +			return -EINVAL;
> +
> +		ssp = _get_user_shstk_addr();

Needs to test retval for 0 here and return error if so.

> +		err = _create_rstor_token(ia32, ssp, token_addr);
> +		if (err)
> +			return err;
> +
> +		if (ia32) {
> +			*new_ssp = *token_addr - sizeof(u32);
> +			err = write_user_shstk_32((u32 __user *)*new_ssp, (u32)ret_addr);
> +		} else {
> +			*new_ssp = *token_addr - sizeof(u64);
> +			err = write_user_shstk_64((u64 __user *)*new_ssp, (u64)ret_addr);

In both cases, you should write *new_ssp only when write_user_shstk_*
functions have succeeded.

> +		}
> +	}
> +
> +	return err;
> +}
> +
> +/*
> + * Verify token_addr point to a valid token, and then set *new_ssp

			points

> + * according to the token.
> + */
> +int shstk_check_rstor_token(bool ia32, unsigned long token_addr, unsigned long *new_ssp)
> +{
> +	unsigned long token;
> +
> +	*new_ssp = 0;

Same as above.

> +
> +	if (!IS_ALIGNED(token_addr, 8))
> +		return -EINVAL;
> +
> +	if (get_user(token, (unsigned long __user *)token_addr))
> +		return -EFAULT;
> +
> +	/* Is 64-bit mode flag correct? */
> +	if (!ia32 && !IS_TOKEN_64(token))
> +		return -EINVAL;
> +	else if (ia32 && !IS_TOKEN_32(token))
> +		return -EINVAL;

That test can be done using the XOR function - i.e., you want to return
an error value when the two things are different.

In order to make this more readable, you call ia32 "proc32" to be clear
what that variable denotes - a 32-bit process. Then, you do

	bool shstk32 = !(token & BIT(0));

	if (proc32 ^ shstk32)
		return -EINVAL;

Voila.

> +	token &= ~TOKEN_MODE_MASK;
> +
> +	/*
> +	 * Restore address properly aligned?
> +	 */
> +	if ((!ia32 && !IS_ALIGNED(token, 8)) || !IS_ALIGNED(token, 4))

Flip logic as above.
Yu-cheng Yu May 17, 2021, 8:55 p.m. UTC | #2
On 5/17/2021 12:45 AM, Borislav Petkov wrote:
> On Tue, Apr 27, 2021 at 01:43:09PM -0700, Yu-cheng Yu wrote:
>> +static inline int write_user_shstk_32(u32 __user *addr, u32 val)
>> +{
>> +	WARN_ONCE(1, "%s used but not supported.\n", __func__);
>> +	return -EFAULT;
>> +}
>> +#endif
> 
> What is that supposed to catch? Any concrete (mis-)use cases?
> 

If 32-bit apps are not supported, there should be no need of 32-bit 
shadow stack write, otherwise there is a bug.

[...]

>> diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
>> index d387df84b7f1..48a0c87414ef 100644
>> --- a/arch/x86/kernel/shstk.c
>> +++ b/arch/x86/kernel/shstk.c
>> @@ -20,6 +20,7 @@
>>   #include <asm/fpu/xstate.h>
>>   #include <asm/fpu/types.h>
>>   #include <asm/cet.h>
>> +#include <asm/special_insns.h>
>>   
>>   static void start_update_msrs(void)
>>   {
>> @@ -176,3 +177,128 @@ void shstk_disable(void)
>>   
>>   	shstk_free(current);
>>   }
>> +
>> +static unsigned long _get_user_shstk_addr(void)
> 
> What's the "_" prefix in the name supposed to denote?
> 
> Ditto for the other functions with "_" prefix you're adding.
> 

These are static functions.  I thought that would make the static scope 
clear.  I can remove "_".

>> +{
>> +	struct fpu *fpu = &current->thread.fpu;
>> +	unsigned long ssp = 0;
>> +
>> +	fpregs_lock();
>> +
>> +	if (fpregs_state_valid(fpu, smp_processor_id())) {
>> +		rdmsrl(MSR_IA32_PL3_SSP, ssp);
>> +	} else {
>> +		struct cet_user_state *p;
>> +
>> +		p = get_xsave_addr(&fpu->state.xsave, XFEATURE_CET_USER);
>> +		if (p)
>> +			ssp = p->user_ssp;
>> +	}
>> +
>> +	fpregs_unlock();
> 
> <---- newline here.
> 
>> +	return ssp;
>> +}
>> +
>> +#define TOKEN_MODE_MASK	3UL
>> +#define TOKEN_MODE_64	1UL
>> +#define IS_TOKEN_64(token) (((token) & TOKEN_MODE_MASK) == TOKEN_MODE_64)
>> +#define IS_TOKEN_32(token) (((token) & TOKEN_MODE_MASK) == 0)
> 
> Why do you have to look at the second, busy bit, too in order to
> determine the mode?
> 

If the busy bit is set, it is only for SAVEPREVSSP, and invalid as a 
normal restore token.

> Also, you don't need most of those defines - see below.
> 
>> +/*
>> + * Create a restore token on the shadow stack.  A token is always 8-byte
>> + * and aligned to 8.
>> + */
>> +static int _create_rstor_token(bool ia32, unsigned long ssp,
>> +			       unsigned long *token_addr)
>> +{
>> +	unsigned long addr;
>> +
>> +	*token_addr = 0;
> 
> What for? Callers should check this function's retval and then interpret
> the validity of token_addr and it should not unconditionally write into
> it.
> 

Ok.

>> +
>> +	if ((!ia32 && !IS_ALIGNED(ssp, 8)) || !IS_ALIGNED(ssp, 4))
> 
> Flip this logic:
> 
> 	if ((ia32 && !IS_ALIGNED(ssp, 4)) || !IS_ALIGNED(ssp, 8))
> 
>> +		return -EINVAL;
>> +
>> +	addr = ALIGN_DOWN(ssp, 8) - 8;
> 
> Yah, so this is weird. Why does the restore token need to be at -8
> instead on the shadow stack address itself?

With the lower two bits masked out, the restore token must point 
directly above itself.

> 
> Looking at
> 
> Figure 18-2. RSTORSSP to Switch to New Shadow Stack
> Figure 18-3. SAVEPREVSSP to Save a Restore Point
> 
> in the SDM, it looks like unnecessarily more complex than it should be.
> But maybe there's some magic I'm missing.
> 
>> +
>> +	/* Is the token for 64-bit? */
>> +	if (!ia32)
>> +		ssp |= TOKEN_MODE_64;
> 
> 		    |= BIT(0);
> 

Ok, then, we don't use #define's.  I will put in comments about what it 
is doing, and fix the rest.

Thanks,
Yu-cheng
Eugene Syromiatnikov May 18, 2021, 12:14 a.m. UTC | #3
On Mon, May 17, 2021 at 01:55:01PM -0700, Yu, Yu-cheng wrote:
> On 5/17/2021 12:45 AM, Borislav Petkov wrote:
> >On Tue, Apr 27, 2021 at 01:43:09PM -0700, Yu-cheng Yu wrote:
> >>+static inline int write_user_shstk_32(u32 __user *addr, u32 val)
> >>+{
> >>+	WARN_ONCE(1, "%s used but not supported.\n", __func__);
> >>+	return -EFAULT;
> >>+}
> >>+#endif
> >
> >What is that supposed to catch? Any concrete (mis-)use cases?
> >
> 
> If 32-bit apps are not supported, there should be no need of 32-bit shadow
> stack write, otherwise there is a bug.

Speaking of which, I wonder what would happen if a 64-bit process makes
a 32-bit system call (using int 0x80, for example), and gets a signal.
Borislav Petkov May 18, 2021, 5:56 a.m. UTC | #4
On Mon, May 17, 2021 at 01:55:01PM -0700, Yu, Yu-cheng wrote:
> If 32-bit apps are not supported, there should be no need of 32-bit shadow
> stack write, otherwise there is a bug.

Aha, just a precaution. Then you can reduce the ifdeffery a bit (ontop
of yours):

---
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index f962da1fe9b5..5b48c91fa8d4 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -235,9 +235,14 @@ static inline void clwb(volatile void *__p)
 }
 
 #ifdef CONFIG_X86_SHADOW_STACK
-#if defined(CONFIG_IA32_EMULATION) || defined(CONFIG_X86_X32)
 static inline int write_user_shstk_32(u32 __user *addr, u32 val)
 {
+	if (WARN_ONCE(!IS_ENABLED(CONFIG_IA32_EMULATION) &&
+		      !IS_ENABLED(CONFIG_X86_X32),
+		      "%s used but not supported.\n", __func__)) {
+		return -EFAULT;
+	}
+
 	asm_volatile_goto("1: wrussd %[val], (%[addr])\n"
 			  _ASM_EXTABLE(1b, %l[fail])
 			  :: [addr] "r" (addr), [val] "r" (val)
@@ -246,13 +251,6 @@ static inline int write_user_shstk_32(u32 __user *addr, u32 val)
 fail:
 	return -EFAULT;
 }
-#else
-static inline int write_user_shstk_32(u32 __user *addr, u32 val)
-{
-	WARN_ONCE(1, "%s used but not supported.\n", __func__);
-	return -EFAULT;
-}
-#endif
 
 static inline int write_user_shstk_64(u64 __user *addr, u64 val)
 {

> These are static functions.  I thought that would make the static scope
> clear.  I can remove "_".

No, "_" or "__" prefixed functions are generally supposed to denote
internal interfaces which should not be used by other kernel facilities.
In that case you have the external api <function_name> and the lower
level helpers _<function_name>, __<function_name>, etc. They can be
static but not necessarily.

This is not the case here so you can simply drop the "_" prefixes.

> If the busy bit is set, it is only for SAVEPREVSSP, and invalid as a
> normal restore token.

Sure but the busy bit is independent from the mode.

> With the lower two bits masked out, the restore token must point
> directly above itself.

That I know - I'm just questioning the design. It should be

	addr = ALIGN_DOWN(ssp, 8);

Plain and simple.

Not this silly pushing and popping of stuff. But it is too late now
anyway and it's not like hw people talk to software people who get to
implement their shit.

> Ok, then, we don't use #define's. I will put in comments about what it
> is doing, and fix the rest.

Thx.
Borislav Petkov May 18, 2021, 5:58 p.m. UTC | #5
On Tue, May 18, 2021 at 02:14:14AM +0200, Eugene Syromiatnikov wrote:
> Speaking of which, I wonder what would happen if a 64-bit process makes
> a 32-bit system call (using int 0x80, for example), and gets a signal.

I guess that's the next patch. And I see amluto has some concerns...

/me goes read.
Yu-cheng Yu May 18, 2021, 6:05 p.m. UTC | #6
On 5/17/2021 5:14 PM, Eugene Syromiatnikov wrote:
> On Mon, May 17, 2021 at 01:55:01PM -0700, Yu, Yu-cheng wrote:
>> On 5/17/2021 12:45 AM, Borislav Petkov wrote:
>>> On Tue, Apr 27, 2021 at 01:43:09PM -0700, Yu-cheng Yu wrote:
>>>> +static inline int write_user_shstk_32(u32 __user *addr, u32 val)
>>>> +{
>>>> +	WARN_ONCE(1, "%s used but not supported.\n", __func__);
>>>> +	return -EFAULT;
>>>> +}
>>>> +#endif
>>>
>>> What is that supposed to catch? Any concrete (mis-)use cases?
>>>
>>
>> If 32-bit apps are not supported, there should be no need of 32-bit shadow
>> stack write, otherwise there is a bug.
> 
> Speaking of which, I wonder what would happen if a 64-bit process makes
> a 32-bit system call (using int 0x80, for example), and gets a signal.
> 

Yes, that's right.  Thanks!  I should have said, if neither IA32 nor X32 
is supported.

Yu-cheng
Yu-cheng Yu May 18, 2021, 7:45 p.m. UTC | #7
On 5/18/2021 10:58 AM, Borislav Petkov wrote:
> On Tue, May 18, 2021 at 02:14:14AM +0200, Eugene Syromiatnikov wrote:
>> Speaking of which, I wonder what would happen if a 64-bit process makes
>> a 32-bit system call (using int 0x80, for example), and gets a signal.
> 
> I guess that's the next patch. And I see amluto has some concerns...
> 
> /me goes read.
> 

In the next revision, there will be no "signal context extension" 
struct.  However, the flow for 64, ia32 and x32 will be similar.  I will 
send that out after some testing.

Thanks,
Yu-cheng
Yu-cheng Yu May 21, 2021, 4:17 p.m. UTC | #8
On 5/17/2021 12:45 AM, Borislav Petkov wrote:
> On Tue, Apr 27, 2021 at 01:43:09PM -0700, Yu-cheng Yu wrote:

[...]

>> +
>> +	if ((!ia32 && !IS_ALIGNED(ssp, 8)) || !IS_ALIGNED(ssp, 4))
> 
> Flip this logic:
> 
> 	if ((ia32 && !IS_ALIGNED(ssp, 4)) || !IS_ALIGNED(ssp, 8))

If !IS_ALIGNED(ssp, 4), then certainly !IS_ALIGNED(ssp, 8).
This has to be as-is, I think.

Thanks,
Yu-cheng
Borislav Petkov May 21, 2021, 6:40 p.m. UTC | #9
On Fri, May 21, 2021 at 09:17:24AM -0700, Yu, Yu-cheng wrote:
> If !IS_ALIGNED(ssp, 4), then certainly !IS_ALIGNED(ssp, 8).

... but the reverse is true: when it is aligned by 8, it is already
aligned by 4. Whoops, that's tricky. Pls put a comment over it so that
we don't forget.

Thx.
diff mbox series

Patch

diff --git a/arch/x86/include/asm/cet.h b/arch/x86/include/asm/cet.h
index 8b83ded577cc..ef6155213b7e 100644
--- a/arch/x86/include/asm/cet.h
+++ b/arch/x86/include/asm/cet.h
@@ -20,6 +20,10 @@  int shstk_setup_thread(struct task_struct *p, unsigned long clone_flags,
 		       unsigned long stack_size);
 void shstk_free(struct task_struct *p);
 void shstk_disable(void);
+int shstk_setup_rstor_token(bool ia32, unsigned long rstor,
+			    unsigned long *token_addr, unsigned long *new_ssp);
+int shstk_check_rstor_token(bool ia32, unsigned long token_addr,
+			    unsigned long *new_ssp);
 #else
 static inline int shstk_setup(void) { return 0; }
 static inline int shstk_setup_thread(struct task_struct *p,
@@ -27,6 +31,11 @@  static inline int shstk_setup_thread(struct task_struct *p,
 				     unsigned long stack_size) { return 0; }
 static inline void shstk_free(struct task_struct *p) {}
 static inline void shstk_disable(void) {}
+static inline int shstk_setup_rstor_token(bool ia32, unsigned long rstor,
+					  unsigned long *token_addr,
+					  unsigned long *new_ssp) { return 0; }
+static inline int shstk_check_rstor_token(bool ia32, unsigned long token_addr,
+					  unsigned long *new_ssp) { return 0; }
 #endif
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 1d3cbaef4bb7..5a0488923cae 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -234,6 +234,38 @@  static inline void clwb(volatile void *__p)
 		: [pax] "a" (p));
 }
 
+#ifdef CONFIG_X86_SHADOW_STACK
+#if defined(CONFIG_IA32_EMULATION) || defined(CONFIG_X86_X32)
+static inline int write_user_shstk_32(u32 __user *addr, u32 val)
+{
+	asm_volatile_goto("1: wrussd %[val], (%[addr])\n"
+			  _ASM_EXTABLE(1b, %l[fail])
+			  :: [addr] "r" (addr), [val] "r" (val)
+			  :: fail);
+	return 0;
+fail:
+	return -EFAULT;
+}
+#else
+static inline int write_user_shstk_32(u32 __user *addr, u32 val)
+{
+	WARN_ONCE(1, "%s used but not supported.\n", __func__);
+	return -EFAULT;
+}
+#endif
+
+static inline int write_user_shstk_64(u64 __user *addr, u64 val)
+{
+	asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
+			  _ASM_EXTABLE(1b, %l[fail])
+			  :: [addr] "r" (addr), [val] "r" (val)
+			  :: fail);
+	return 0;
+fail:
+	return -EFAULT;
+}
+#endif /* CONFIG_X86_SHADOW_STACK */
+
 #define nop() asm volatile ("nop")
 
 static inline void serialize(void)
diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
index d387df84b7f1..48a0c87414ef 100644
--- a/arch/x86/kernel/shstk.c
+++ b/arch/x86/kernel/shstk.c
@@ -20,6 +20,7 @@ 
 #include <asm/fpu/xstate.h>
 #include <asm/fpu/types.h>
 #include <asm/cet.h>
+#include <asm/special_insns.h>
 
 static void start_update_msrs(void)
 {
@@ -176,3 +177,128 @@  void shstk_disable(void)
 
 	shstk_free(current);
 }
+
+static unsigned long _get_user_shstk_addr(void)
+{
+	struct fpu *fpu = &current->thread.fpu;
+	unsigned long ssp = 0;
+
+	fpregs_lock();
+
+	if (fpregs_state_valid(fpu, smp_processor_id())) {
+		rdmsrl(MSR_IA32_PL3_SSP, ssp);
+	} else {
+		struct cet_user_state *p;
+
+		p = get_xsave_addr(&fpu->state.xsave, XFEATURE_CET_USER);
+		if (p)
+			ssp = p->user_ssp;
+	}
+
+	fpregs_unlock();
+	return ssp;
+}
+
+#define TOKEN_MODE_MASK	3UL
+#define TOKEN_MODE_64	1UL
+#define IS_TOKEN_64(token) (((token) & TOKEN_MODE_MASK) == TOKEN_MODE_64)
+#define IS_TOKEN_32(token) (((token) & TOKEN_MODE_MASK) == 0)
+
+/*
+ * Create a restore token on the shadow stack.  A token is always 8-byte
+ * and aligned to 8.
+ */
+static int _create_rstor_token(bool ia32, unsigned long ssp,
+			       unsigned long *token_addr)
+{
+	unsigned long addr;
+
+	*token_addr = 0;
+
+	if ((!ia32 && !IS_ALIGNED(ssp, 8)) || !IS_ALIGNED(ssp, 4))
+		return -EINVAL;
+
+	addr = ALIGN_DOWN(ssp, 8) - 8;
+
+	/* Is the token for 64-bit? */
+	if (!ia32)
+		ssp |= TOKEN_MODE_64;
+
+	if (write_user_shstk_64((u64 __user *)addr, (u64)ssp))
+		return -EFAULT;
+
+	*token_addr = addr;
+	return 0;
+}
+
+/*
+ * Create a restore token on shadow stack, and then push the user-mode
+ * function return address.
+ */
+int shstk_setup_rstor_token(bool ia32, unsigned long ret_addr,
+			    unsigned long *token_addr, unsigned long *new_ssp)
+{
+	struct cet_status *cet = &current->thread.cet;
+	unsigned long ssp = 0;
+	int err = 0;
+
+	if (cet->shstk_size) {
+		if (!ret_addr)
+			return -EINVAL;
+
+		ssp = _get_user_shstk_addr();
+		err = _create_rstor_token(ia32, ssp, token_addr);
+		if (err)
+			return err;
+
+		if (ia32) {
+			*new_ssp = *token_addr - sizeof(u32);
+			err = write_user_shstk_32((u32 __user *)*new_ssp, (u32)ret_addr);
+		} else {
+			*new_ssp = *token_addr - sizeof(u64);
+			err = write_user_shstk_64((u64 __user *)*new_ssp, (u64)ret_addr);
+		}
+	}
+
+	return err;
+}
+
+/*
+ * Verify token_addr point to a valid token, and then set *new_ssp
+ * according to the token.
+ */
+int shstk_check_rstor_token(bool ia32, unsigned long token_addr, unsigned long *new_ssp)
+{
+	unsigned long token;
+
+	*new_ssp = 0;
+
+	if (!IS_ALIGNED(token_addr, 8))
+		return -EINVAL;
+
+	if (get_user(token, (unsigned long __user *)token_addr))
+		return -EFAULT;
+
+	/* Is 64-bit mode flag correct? */
+	if (!ia32 && !IS_TOKEN_64(token))
+		return -EINVAL;
+	else if (ia32 && !IS_TOKEN_32(token))
+		return -EINVAL;
+
+	token &= ~TOKEN_MODE_MASK;
+
+	/*
+	 * Restore address properly aligned?
+	 */
+	if ((!ia32 && !IS_ALIGNED(token, 8)) || !IS_ALIGNED(token, 4))
+		return -EINVAL;
+
+	/*
+	 * Token was placed properly?
+	 */
+	if (((ALIGN_DOWN(token, 8) - 8) != token_addr) || token >= TASK_SIZE_MAX)
+		return -EINVAL;
+
+	*new_ssp = token;
+	return 0;
+}