diff mbox series

[RFT,v9,4/8] fork: Add shadow stack support to clone3()

Message ID 20240819-clone3-shadow-stack-v9-4-962d74f99464@kernel.org (mailing list archive)
State New
Headers show
Series fork: Support shadow stacks in clone3() | expand

Commit Message

Mark Brown Aug. 19, 2024, 7:24 p.m. UTC
Unlike with the normal stack there is no API for configuring the the shadow
stack for a new thread, instead the kernel will dynamically allocate a new
shadow stack with the same size as the normal stack. This appears to be due
to the shadow stack series having been in development since before the more
extensible clone3() was added rather than anything more deliberate.

Add parameters to clone3() specifying the location and size of a shadow
stack for the newly created process.  If no shadow stack is specified
then the existing implicit allocation behaviour is maintained.

If a stack is specified then it is required to have an architecture
defined token placed on the stack, this will be consumed by the new
task.  If the token is not provided then this will be reported as a
segmentation fault with si_code SEGV_CPERR, as a runtime shadow stack
protection error would be.  This allows architectures to implement the
validation of the token in the child process context.

If the architecture does not support shadow stacks the shadow stack
parameters must be zero, architectures that do support the feature are
expected to enforce the same requirement on individual systems that lack
shadow stack support.

Update the existing x86 implementation to pay attention to the newly added
arguments, in order to maintain compatibility we use the existing behaviour
if no shadow stack is specified. Minimal validation is done of the supplied
parameters, detailed enforcement is left to when the thread is executed.
Since we are now using more fields from the kernel_clone_args we pass that
into the shadow stack code rather than individual fields.

At present this implementation does not consume the shadow stack token
atomically as would be desirable, it uses a separate read and write.

Signed-off-by: Mark Brown <broonie@kernel.org>
---
 arch/x86/include/asm/shstk.h |  11 +++--
 arch/x86/kernel/process.c    |   2 +-
 arch/x86/kernel/shstk.c      | 103 +++++++++++++++++++++++++++++---------
 include/linux/sched/task.h   |  18 +++++++
 include/uapi/linux/sched.h   |  13 +++--
 kernel/fork.c                | 114 ++++++++++++++++++++++++++++++++++++++-----
 6 files changed, 219 insertions(+), 42 deletions(-)

Comments

Edgecombe, Rick P Aug. 20, 2024, 9:36 p.m. UTC | #1
On Mon, 2024-08-19 at 20:24 +0100, Mark Brown wrote:


[snip]

>  
> diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
> index 059685612362..42b2b18de20d 100644
> --- a/arch/x86/kernel/shstk.c
> +++ b/arch/x86/kernel/shstk.c
> @@ -191,44 +191,103 @@ void reset_thread_features(void)
>         current->thread.features_locked = 0;
>  }
>  
> -unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long
> clone_flags,
> -                                      unsigned long stack_size)
> +int arch_shstk_validate_clone(struct task_struct *t,
> +                             struct vm_area_struct *vma,
> +                             struct page *page,
> +                             struct kernel_clone_args *args)
> +{
> +       /*
> +        * SSP is aligned, so reserved bits and mode bit are a zero, just mark
> +        * the token 64-bit.
> +        */
> +       void *maddr = kmap_local_page(page);
> +       int offset;
> +       unsigned long addr, ssp;
> +       u64 expected;
> +       u64 val;
> +
> +       if (!features_enabled(ARCH_SHSTK_SHSTK))
> +               return 0;
> +
> +       ssp = args->shadow_stack + args->shadow_stack_size;
> +       addr = ssp - SS_FRAME_SIZE;
> +       expected = ssp | BIT(0);
> +       offset = offset_in_page(ssp);
> +
> +       /* This should really be an atomic cmpxchg.  It is not. */
> +       copy_from_user_page(vma, page, addr, &val, maddr + offset,
> +                           sizeof(val));

Were so close to the real cmpxchg at this point. I took a shot at it with the
diff at the end. I'm not sure if it might need some of the instrumentation
calls.

> +
> +       if (val != expected)
> +               return false;

Return false for an int will be 0 (i.e. success). I think it might be covering
up a bug. The gup happens to args->shadow_stack + args->shadow_stack_size - 1
(the size inclusive). But the copy happens at the size exclusive.

So shadow_stack_size = PAGE_SIZE, will try to read the token at the start of the
shadow stack, but the failure will be reported as success. I think...

On another note, I think we need to verify that ssp is 8 byte aligned, or it
could be made to overflow the adjacent direct map page a few bytes. At least I
didn't see how it was prevented.

> +       val = 0;
> +
> +       copy_to_user_page(vma, page, addr, maddr + offset, &val, sizeof(val));
> +       set_page_dirty_lock(page);
> +
> +       return 0;
> +}
> +
> 
[snip]
>  
> +static int shstk_validate_clone(struct task_struct *p,
> +                               struct kernel_clone_args *args)
> +{
> +       struct mm_struct *mm;
> +       struct vm_area_struct *vma;
> +       struct page *page;
> +       unsigned long addr;
> +       int ret;
> +
> +       if (!IS_ENABLED(CONFIG_ARCH_HAS_USER_SHADOW_STACK))
> +               return 0;
> +
> +       if (!args->shadow_stack)
> +               return 0;
> +
> +       mm = get_task_mm(p);
> +       if (!mm)
> +               return -EFAULT;
> +
> +       mmap_read_lock(mm);
> +
> +       /*
> +        * All current shadow stack architectures have tokens at the
> +        * top of a downward growing shadow stack.
> +        */
> +       addr = args->shadow_stack + args->shadow_stack_size - 1;
> +       addr = untagged_addr_remote(mm, addr);
> +
> +       page = get_user_page_vma_remote(mm, addr, FOLL_FORCE | FOLL_WRITE,
> +                                       &vma);
> +       if (IS_ERR(page)) {
> +               ret = -EFAULT;
> +               goto out;
> +       }
> +
> +       if (!(vma->vm_flags & VM_SHADOW_STACK)) {

Can we check VM_WRITE here too? At least on x86, shadow stacks can be
mprotect()ed as read-only. The reason for this before I think fell out of the
implementation details, but all the same it would be nice be consistent. Then it
should behave identically to a real shadow stack access.

> +               ret = -EFAULT;
> +               goto out_page;
> +       }
> +
> +       ret = arch_shstk_validate_clone(p, vma, page, args);
> +
> +out_page:
> +       put_page(page);
> +out:
> +       mmap_read_unlock(mm);
> +       mmput(mm);
> +       return ret;
> +}
> +
> 
[snip]
>  
> +/**
> + * clone3_shadow_stack_valid - check and prepare shadow stack
> + * @kargs: kernel clone args
> + *
> + * Verify that shadow stacks are only enabled if supported.
> + */
> +static inline bool clone3_shadow_stack_valid(struct kernel_clone_args *kargs)
> +{
> +       if (kargs->shadow_stack) {
> +               if (!kargs->shadow_stack_size)
> +                       return false;
> +
> +               if (kargs->shadow_stack_size < SHADOW_STACK_SIZE_MIN)
> +                       return false;
> +
> +               if (kargs->shadow_stack_size > rlimit(RLIMIT_STACK))
> +                       return false;

At the risk of asking a stupid question or one that I should have asked a long
time ago...

Why do we need both shadow_stack and shadow_stack_size? We are basically asking
it to consume a token at a pointer and have userspace manage the shadow stack
itself. So why does the kernel care what size it is? Couldn't we just have
'shadow_stack' have that mean consume a token here.

> +
> +               /*
> +                * The architecture must check support on the specific
> +                * machine.
> +                */
> +               return IS_ENABLED(CONFIG_ARCH_HAS_USER_SHADOW_STACK);
> +       } else {
> +               return !kargs->shadow_stack_size;
> +       }
> +}
> +

Fixing some of mentioned bugs, this on top passed the selftests for me. It
doesn't have the 8 byte alignment check I mentioned because I'm less sure I
might be missing it somewhere.

diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
index 42b2b18de20d..2685180b8c5c 100644
--- a/arch/x86/kernel/shstk.c
+++ b/arch/x86/kernel/shstk.c
@@ -204,7 +204,6 @@ int arch_shstk_validate_clone(struct task_struct *t,
        int offset;
        unsigned long addr, ssp;
        u64 expected;
-       u64 val;
 
        if (!features_enabled(ARCH_SHSTK_SHSTK))
                return 0;
@@ -212,17 +211,12 @@ int arch_shstk_validate_clone(struct task_struct *t,
        ssp = args->shadow_stack + args->shadow_stack_size;
        addr = ssp - SS_FRAME_SIZE;
        expected = ssp | BIT(0);
-       offset = offset_in_page(ssp);
+       offset = offset_in_page(addr);
 
-       /* This should really be an atomic cmpxchg.  It is not. */
-       copy_from_user_page(vma, page, addr, &val, maddr + offset,
-                           sizeof(val));
+       if (!cmpxchg_to_user_page(vma, page, addr, (unsigned long *)(maddr +
offset),
+                                 expected, 0))
+               return -EINVAL;
 
-       if (val != expected)
-               return false;
-       val = 0;
-
-       copy_to_user_page(vma, page, addr, maddr + offset, &val, sizeof(val));
        set_page_dirty_lock(page);
 
        return 0;
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 7ee8a179d103..1500d49bc3f7 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -124,4 +124,15 @@ static inline void flush_cache_vunmap(unsigned long start,
unsigned long end)
        } while (0)
 #endif
 
+#ifndef cmpxchg_to_user_page
+#define cmpxchg_to_user_page(vma, page, vaddr, ptr, old, new)  \
+({                                                             \
+       bool ret;                                               \
+                                                               \
+       ret = try_cmpxchg(ptr, &old, new);                      \
+       flush_icache_user_page(vma, page, vaddr, sizeof(*ptr)); \
+       ret;                                                    \
+})
+#endif
+
 #endif /* _ASM_GENERIC_CACHEFLUSH_H */
Mark Brown Aug. 20, 2024, 11:34 p.m. UTC | #2
On Tue, Aug 20, 2024 at 09:36:46PM +0000, Edgecombe, Rick P wrote:
> On Mon, 2024-08-19 at 20:24 +0100, Mark Brown wrote:

> > +       /* This should really be an atomic cmpxchg.  It is not. */
> > +       copy_from_user_page(vma, page, addr, &val, maddr + offset,
> > +                           sizeof(val));

> Were so close to the real cmpxchg at this point. I took a shot at it with the
> diff at the end. I'm not sure if it might need some of the instrumentation
> calls.

Great - I hadn't been sure if there was any fun with access from kernel
mode on x86.  I can't get that patch to apply cleanly FWIW:

patching file arch/x86/kernel/shstk.c
Hunk #1 FAILED at 204.
patch: **** malformed patch at line 24: offset),

I think I got everything integrated correctly, I should have a version
with that folded in out tomorrow.

> > +
> > +       if (val != expected)
> > +               return false;

> Return false for an int will be 0 (i.e. success). I think it might be covering
> up a bug. The gup happens to args->shadow_stack + args->shadow_stack_size - 1
> (the size inclusive). But the copy happens at the size exclusive.

Ah, yeah, thanks for noticing - that's cut'n'paste from the arm64 code
where the token check is in a separate function.

> > +       if (!(vma->vm_flags & VM_SHADOW_STACK)) {

> Can we check VM_WRITE here too? At least on x86, shadow stacks can be
> mprotect()ed as read-only. The reason for this before I think fell out of the
> implementation details, but all the same it would be nice be consistent. Then it
> should behave identically to a real shadow stack access.

Seems reasonable.

> > +               if (kargs->shadow_stack_size < SHADOW_STACK_SIZE_MIN)
> > +                       return false;

> > +               if (kargs->shadow_stack_size > rlimit(RLIMIT_STACK))
> > +                       return false;

> At the risk of asking a stupid question or one that I should have asked a long
> time ago...
> 
> Why do we need both shadow_stack and shadow_stack_size? We are basically asking
> it to consume a token at a pointer and have userspace manage the shadow stack
> itself. So why does the kernel care what size it is? Couldn't we just have
> 'shadow_stack' have that mean consume a token here.

I was doing things this way for symmetry with how we specify the normal
stack.  That's a bit different since the kernel will actually use the
size for the normal stack but it felt nicer to keep things looking
consistent, it saves users wondering why they work differently.  It's
also a bit of a help with portability given that arm64 expects to have a
top of stack marker above the token by default while x86 doesn't support
that.
Edgecombe, Rick P Aug. 20, 2024, 11:57 p.m. UTC | #3
On Wed, 2024-08-21 at 00:34 +0100, Mark Brown wrote:
> > Why do we need both shadow_stack and shadow_stack_size? We are basically
> > asking
> > it to consume a token at a pointer and have userspace manage the shadow
> > stack
> > itself. So why does the kernel care what size it is? Couldn't we just have
> > 'shadow_stack' have that mean consume a token here.
> 
> I was doing things this way for symmetry with how we specify the normal
> stack.  That's a bit different since the kernel will actually use the
> size for the normal stack but it felt nicer to keep things looking
> consistent, it saves users wondering why they work differently.  It's
> also a bit of a help with portability given that arm64 expects to have a
> top of stack marker above the token by default while x86 doesn't support
> that.

Hmm, so then on arm the kernel would look for the token down a frame. Hmm. I
think it makes it even stranger ABI wise.

SHADOW_STACK_SET_MARKER can be optional (not on arm, but could be in the
future). Then the shadow_stack_size to token offset behavior would depend on
some historical originally supported combination of map_shadow_stack args.

BTW, just to try to reduce potential future revisions, what do you think about
the 8 byte alignment need? Did I miss the check somewhere?
Mark Brown Aug. 21, 2024, 12:19 a.m. UTC | #4
On Tue, Aug 20, 2024 at 11:57:23PM +0000, Edgecombe, Rick P wrote:
> On Wed, 2024-08-21 at 00:34 +0100, Mark Brown wrote:

> > I was doing things this way for symmetry with how we specify the normal
> > stack.  That's a bit different since the kernel will actually use the
> > size for the normal stack but it felt nicer to keep things looking
> > consistent, it saves users wondering why they work differently.  It's
> > also a bit of a help with portability given that arm64 expects to have a
> > top of stack marker above the token by default while x86 doesn't support
> > that.

> Hmm, so then on arm the kernel would look for the token down a frame. Hmm. I
> think it makes it even stranger ABI wise.

I think it's going to be strange one way or another, either you specify
a size that we don't currently really use or you have two things both
called stacks which are described differently.  I suppose we could call
a single parameter shadow_stack_pointer?  Though I do note that as you
indicated we've been going for some time and this is the first time it
came up...

> SHADOW_STACK_SET_MARKER can be optional (not on arm, but could be in the
> future). Then the shadow_stack_size to token offset behavior would depend on
> some historical originally supported combination of map_shadow_stack args.

I called it _SET_TOKEN, it's optional on arm64 - we check both potential
locations for the token in clone3().

> BTW, just to try to reduce potential future revisions, what do you think about
> the 8 byte alignment need? Did I miss the check somewhere?

I've added a check that both the base address and size are sizeof(void *)
aligned.
Edgecombe, Rick P Aug. 21, 2024, 1:45 a.m. UTC | #5
On Wed, 2024-08-21 at 01:19 +0100, Mark Brown wrote:
> I think it's going to be strange one way or another, either you specify
> a size that we don't currently really use or you have two things both
> called stacks which are described differently.

I would guess users of raw clone3 calls would be able to handle that kind of
variation.

I was just trying to figure out why there is both the pointer and size for
normal stacks. It seems that one usage is that you don't have to worry about
whether your arch's stack grows up or down. But otherwise, the previous clone's
didn't need the size. Before clone3 the stack size users seem to be kernel
threads, so when they unified the infrastructure behind kernel_clone_args,
stack_size was needed for the struct. Could it be that it just leaked to
userspace for that reason? I don't know, but I would think a tweak to such a
fundamental syscall should have some purposeful design behind it.

>   I suppose we could call
> a single parameter shadow_stack_pointer?  Though I do note that as you
> indicated we've been going for some time and this is the first time it
> came up...

Sorry for that. I looked through all the old threads expecting to find
discussion, but couldn't find an answer. Is clone3 support a dependency for arm
shadow stacks?
Mark Brown Aug. 21, 2024, 12:45 p.m. UTC | #6
On Wed, Aug 21, 2024 at 01:45:16AM +0000, Edgecombe, Rick P wrote:
> On Wed, 2024-08-21 at 01:19 +0100, Mark Brown wrote:

> > I think it's going to be strange one way or another, either you specify
> > a size that we don't currently really use or you have two things both
> > called stacks which are described differently.

> I would guess users of raw clone3 calls would be able to handle that kind of
> variation.

Oh, I'm sure people could cope either way - it's more a question of
clarity and not causing people go do needless investigations to try to
figure out what's going on than anything else.

> I was just trying to figure out why there is both the pointer and size for
> normal stacks. It seems that one usage is that you don't have to worry about
> whether your arch's stack grows up or down. But otherwise, the previous clone's
> didn't need the size. Before clone3 the stack size users seem to be kernel
> threads, so when they unified the infrastructure behind kernel_clone_args,
> stack_size was needed for the struct. Could it be that it just leaked to
> userspace for that reason? I don't know, but I would think a tweak to such a
> fundamental syscall should have some purposeful design behind it.

It's entirely possible it just leaked.  My own attempts to dig through
the archives haven't turned up anything on the subjecti either, it seems
to have been there from the get go and just gone in without comment.
Equally it could just be that people felt that this was a more tasteful
way of specifying stacks, or that some future use was envisioned.

> >   I suppose we could call
> > a single parameter shadow_stack_pointer?  Though I do note that as you
> > indicated we've been going for some time and this is the first time it
> > came up...

> Sorry for that. I looked through all the old threads expecting to find
> discussion, but couldn't find an answer. Is clone3 support a dependency for arm
> shadow stacks?

Catalin didn't want to merge the arm64 support without clone3(), and
there's code dependencies as a result.  I could unpick it and reverse
the ordering so long as the arm64 maintainers are OK with that since the
overlap is in the implementation of copy_thread() and some of the
dependency patches.
Edgecombe, Rick P Aug. 21, 2024, 3:54 p.m. UTC | #7
On Wed, 2024-08-21 at 13:45 +0100, Mark Brown wrote:
> On Wed, Aug 21, 2024 at 01:45:16AM +0000, Edgecombe, Rick P wrote:
> > On Wed, 2024-08-21 at 01:19 +0100, Mark Brown wrote:
> 
> > > I think it's going to be strange one way or another, either you specify
> > > a size that we don't currently really use or you have two things both
> > > called stacks which are described differently.
> 
> > I would guess users of raw clone3 calls would be able to handle that kind of
> > variation.
> 
> Oh, I'm sure people could cope either way - it's more a question of
> clarity and not causing people go do needless investigations to try to
> figure out what's going on than anything else.

Yes, it won't be a disaster either way.

> 
> > I was just trying to figure out why there is both the pointer and size for
> > normal stacks. It seems that one usage is that you don't have to worry about
> > whether your arch's stack grows up or down. But otherwise, the previous
> > clone's
> > didn't need the size. Before clone3 the stack size users seem to be kernel
> > threads, so when they unified the infrastructure behind kernel_clone_args,
> > stack_size was needed for the struct. Could it be that it just leaked to
> > userspace for that reason? I don't know, but I would think a tweak to such a
> > fundamental syscall should have some purposeful design behind it.
> 
> It's entirely possible it just leaked.  My own attempts to dig through
> the archives haven't turned up anything on the subjecti either, it seems
> to have been there from the get go and just gone in without comment.
> Equally it could just be that people felt that this was a more tasteful
> way of specifying stacks, or that some future use was envisioned.

Ok, well I'm suspicious, but won't object over it. The rest seems settled from
my side. I may try to attract some other x86 attention to that CMPXCHG helper,
but otherwise.

> 
> > >   I suppose we could call
> > > a single parameter shadow_stack_pointer?  Though I do note that as you
> > > indicated we've been going for some time and this is the first time it
> > > came up...
> 
> > Sorry for that. I looked through all the old threads expecting to find
> > discussion, but couldn't find an answer. Is clone3 support a dependency for
> > arm
> > shadow stacks?
> 
> Catalin didn't want to merge the arm64 support without clone3(), and
> there's code dependencies as a result.  I could unpick it and reverse
> the ordering so long as the arm64 maintainers are OK with that since the
> overlap is in the implementation of copy_thread() and some of the
> dependency patches.
Mark Brown Aug. 21, 2024, 5:23 p.m. UTC | #8
On Wed, Aug 21, 2024 at 03:54:49PM +0000, Edgecombe, Rick P wrote:
> On Wed, 2024-08-21 at 13:45 +0100, Mark Brown wrote:

> > It's entirely possible it just leaked.  My own attempts to dig through
> > the archives haven't turned up anything on the subjecti either, it seems
> > to have been there from the get go and just gone in without comment.
> > Equally it could just be that people felt that this was a more tasteful
> > way of specifying stacks, or that some future use was envisioned.

> Ok, well I'm suspicious, but won't object over it. The rest seems settled from
> my side. I may try to attract some other x86 attention to that CMPXCHG helper,
> but otherwise.

OK, I'll post what I've got (with the current ABI) today, incorporating
your x86 fixes and the tighter validation and we can see what people
think.  Perhaps Christian remembers what's going on there?

> > > Sorry for that. I looked through all the old threads expecting to find
> > > discussion, but couldn't find an answer. Is clone3 support a dependency for
> > > arm
> > > shadow stacks?

> > Catalin didn't want to merge the arm64 support without clone3(), and
> > there's code dependencies as a result.  I could unpick it and reverse
> > the ordering so long as the arm64 maintainers are OK with that since the
> > overlap is in the implementation of copy_thread() and some of the
> > dependency patches.

Actually in an off-list discussion today Catalin indicated that he's
fine with relaxing that a little so I'm in the process of picking the
dependency apart.
Catalin Marinas Aug. 21, 2024, 6:05 p.m. UTC | #9
On Wed, Aug 21, 2024 at 06:23:18PM +0100, Mark Brown wrote:
> On Wed, Aug 21, 2024 at 03:54:49PM +0000, Edgecombe, Rick P wrote:
> > On Wed, 2024-08-21 at 13:45 +0100, Mark Brown wrote:
> > > > Sorry for that. I looked through all the old threads expecting to find
> > > > discussion, but couldn't find an answer. Is clone3 support a dependency for
> > > > arm shadow stacks?
> 
> > > Catalin didn't want to merge the arm64 support without clone3(), and
> > > there's code dependencies as a result.  I could unpick it and reverse
> > > the ordering so long as the arm64 maintainers are OK with that since the
> > > overlap is in the implementation of copy_thread() and some of the
> > > dependency patches.
> 
> Actually in an off-list discussion today Catalin indicated that he's
> fine with relaxing that a little so I'm in the process of picking the
> dependency apart.

Just to confirm, I'd rather get the clone3() ABI choices properly
debated than rushing it. It seems that our libc support does not rely on
clone3() yet, so let's continue with the arm64 series independently of
this one (only clone() with default shadow stack allocation). We'll
follow up with the clone3() support that covers both architectures.

Thanks and sorry for the confusion. I did not realise the complications
of adding clone3() support.
Christian Brauner Sept. 27, 2024, 8:50 a.m. UTC | #10
On Wed, Aug 21, 2024 at 06:23:18PM GMT, Mark Brown wrote:
> On Wed, Aug 21, 2024 at 03:54:49PM +0000, Edgecombe, Rick P wrote:
> > On Wed, 2024-08-21 at 13:45 +0100, Mark Brown wrote:
> 
> > > It's entirely possible it just leaked.  My own attempts to dig through
> > > the archives haven't turned up anything on the subjecti either, it seems
> > > to have been there from the get go and just gone in without comment.
> > > Equally it could just be that people felt that this was a more tasteful
> > > way of specifying stacks, or that some future use was envisioned.
> 
> > Ok, well I'm suspicious, but won't object over it. The rest seems settled from
> > my side. I may try to attract some other x86 attention to that CMPXCHG helper,
> > but otherwise.
> 
> OK, I'll post what I've got (with the current ABI) today, incorporating
> your x86 fixes and the tighter validation and we can see what people
> think.  Perhaps Christian remembers what's going on there?

The legacy clone system call had required userspace to know in which
direction the stack was growing and then pass down the stack pointer
appropriately (e.g., parisc grows upwards).

And in fact, the old clone() system call did take an additional
stack_size argument on specific architectures. For example, on
microblaze.

Also, when clone3() was done we still had ia64 in the tree which had a
separate clone2() system call that also required a stack_size argument.

So userspace ended up with code like this or worse:

     #define __STACK_SIZE (8 * 1024 * 1024)
     pid_t sys_clone(int (*fn)(void *), void *arg, int flags, int *pidfd)
     {
             pid_t ret;
             void *stack;

             stack = malloc(__STACK_SIZE);
             if (!stack)
                     return -ENOMEM;

     #ifdef __ia64__
             ret = __clone2(fn, stack, __STACK_SIZE, flags | SIGCHLD, arg, pidfd);
     #elif defined(__parisc__) /* stack grows up */
             ret = clone(fn, stack, flags | SIGCHLD, arg, pidfd);
     #else
             ret = clone(fn, stack + __STACK_SIZE, flags | SIGCHLD, arg, pidfd);
     #endif
             return ret;
     }

So we talked to the glibc folks which preferred the kernel to do all
this nonsense for them as it has that knowledge.

My preference is to keep the api consistent and require a stack_size for
shadow stacks as well.
Edgecombe, Rick P Sept. 27, 2024, 3:21 p.m. UTC | #11
On Fri, 2024-09-27 at 10:50 +0200, Christian Brauner wrote:
> The legacy clone system call had required userspace to know in which
> direction the stack was growing and then pass down the stack pointer
> appropriately (e.g., parisc grows upwards).
> 
> And in fact, the old clone() system call did take an additional
> stack_size argument on specific architectures. For example, on
> microblaze.
> 
> Also, when clone3() was done we still had ia64 in the tree which had a
> separate clone2() system call that also required a stack_size argument.
> 
> So userspace ended up with code like this or worse:
> 
>      #define __STACK_SIZE (8 * 1024 * 1024)
>      pid_t sys_clone(int (*fn)(void *), void *arg, int flags, int *pidfd)
>      {
>              pid_t ret;
>              void *stack;
> 
>              stack = malloc(__STACK_SIZE);
>              if (!stack)
>                      return -ENOMEM;
> 
>      #ifdef __ia64__
>              ret = __clone2(fn, stack, __STACK_SIZE, flags | SIGCHLD, arg, pidfd);
>      #elif defined(__parisc__) /* stack grows up */
>              ret = clone(fn, stack, flags | SIGCHLD, arg, pidfd);
>      #else
>              ret = clone(fn, stack + __STACK_SIZE, flags | SIGCHLD, arg, pidfd);
>      #endif
>              return ret;
>      }
> 
> So we talked to the glibc folks which preferred the kernel to do all
> this nonsense for them as it has that knowledge.

Thanks for the info!

> 
> My preference is to keep the api consistent and require a stack_size for
> shadow stacks as well.

Did you catch that a token can be at a different offsets location on the stack
depending on args passed to map_shadow_stack? So userspace will need something
like the code above, but that adjusts the 'shadow_stack_size' such that the
kernel looks for the token in the right place. It will be even weirder if
someone uses clone3 to switch to a stack that has already been used, and pivoted
off of, such that a token was left in the middle of the stack. In that case
userspace would have to come up with args disconnected from the actual size of
the shadow stack such that the kernel would be cajoled into looking for the
token in the right place.

A shadow stack size is more symmetric on the surface, but I'm not sure it will
be easier for userspace to handle. So I think we should just have a pointer to
the token. But it will be a usable implementation either way.
Christian Brauner Oct. 1, 2024, 3:12 p.m. UTC | #12
On Fri, Sep 27, 2024 at 03:21:59PM GMT, Edgecombe, Rick P wrote:
> On Fri, 2024-09-27 at 10:50 +0200, Christian Brauner wrote:
> > The legacy clone system call had required userspace to know in which
> > direction the stack was growing and then pass down the stack pointer
> > appropriately (e.g., parisc grows upwards).
> > 
> > And in fact, the old clone() system call did take an additional
> > stack_size argument on specific architectures. For example, on
> > microblaze.
> > 
> > Also, when clone3() was done we still had ia64 in the tree which had a
> > separate clone2() system call that also required a stack_size argument.
> > 
> > So userspace ended up with code like this or worse:
> > 
> >      #define __STACK_SIZE (8 * 1024 * 1024)
> >      pid_t sys_clone(int (*fn)(void *), void *arg, int flags, int *pidfd)
> >      {
> >              pid_t ret;
> >              void *stack;
> > 
> >              stack = malloc(__STACK_SIZE);
> >              if (!stack)
> >                      return -ENOMEM;
> > 
> >      #ifdef __ia64__
> >              ret = __clone2(fn, stack, __STACK_SIZE, flags | SIGCHLD, arg, pidfd);
> >      #elif defined(__parisc__) /* stack grows up */
> >              ret = clone(fn, stack, flags | SIGCHLD, arg, pidfd);
> >      #else
> >              ret = clone(fn, stack + __STACK_SIZE, flags | SIGCHLD, arg, pidfd);
> >      #endif
> >              return ret;
> >      }
> > 
> > So we talked to the glibc folks which preferred the kernel to do all
> > this nonsense for them as it has that knowledge.
> 
> Thanks for the info!
> 
> > 
> > My preference is to keep the api consistent and require a stack_size for
> > shadow stacks as well.
> 
> Did you catch that a token can be at a different offsets location on the stack
> depending on args passed to map_shadow_stack? So userspace will need something
> like the code above, but that adjusts the 'shadow_stack_size' such that the
> kernel looks for the token in the right place. It will be even weirder if
> someone uses clone3 to switch to a stack that has already been used, and pivoted
> off of, such that a token was left in the middle of the stack. In that case
> userspace would have to come up with args disconnected from the actual size of
> the shadow stack such that the kernel would be cajoled into looking for the
> token in the right place.
> 
> A shadow stack size is more symmetric on the surface, but I'm not sure it will
> be easier for userspace to handle. So I think we should just have a pointer to
> the token. But it will be a usable implementation either way.

Maybe it's best to let glibc folks decide what is better/more ergonomic for them.
Mark Brown Oct. 1, 2024, 5:33 p.m. UTC | #13
On Tue, Oct 01, 2024 at 05:12:38PM +0200, Christian Brauner wrote:
> On Fri, Sep 27, 2024 at 03:21:59PM GMT, Edgecombe, Rick P wrote:

> > Did you catch that a token can be at a different offsets location on the stack
> > depending on args passed to map_shadow_stack? So userspace will need something
> > like the code above, but that adjusts the 'shadow_stack_size' such that the
> > kernel looks for the token in the right place. It will be even weirder if
> > someone uses clone3 to switch to a stack that has already been used, and pivoted
> > off of, such that a token was left in the middle of the stack. In that case
> > userspace would have to come up with args disconnected from the actual size of
> > the shadow stack such that the kernel would be cajoled into looking for the
> > token in the right place.
> > 
> > A shadow stack size is more symmetric on the surface, but I'm not sure it will
> > be easier for userspace to handle. So I think we should just have a pointer to
> > the token. But it will be a usable implementation either way.

My suspicion would be that if we're doing the pivot to a previously used
shadow stack we'd also be pivoting the regular stack along with it which
would face similar issues with having an unusual method for specifying
the stack top so I don't know how much we're really winning.  Like we
both keep saying either of the interfaces works though, it's just a
taste question with both having downsides.

> Maybe it's best to let glibc folks decide what is better/more ergonomic for them.

The relevant people are on the thread I think.  

I've rebased onto v6.12-rc1, assuming I don't notice anything horrible
in testing I'll post that with the ABI unchanged for now.
Edgecombe, Rick P Oct. 1, 2024, 11:03 p.m. UTC | #14
On Tue, 2024-10-01 at 18:33 +0100, Mark Brown wrote:
> > > A shadow stack size is more symmetric on the surface, but I'm not sure it
> > > will
> > > be easier for userspace to handle. So I think we should just have a
> > > pointer to
> > > the token. But it will be a usable implementation either way.
> 
> My suspicion would be that if we're doing the pivot to a previously used
> shadow stack we'd also be pivoting the regular stack along with it which
> would face similar issues with having an unusual method for specifying
> the stack top so I don't know how much we're really winning.

I'm not so sure. The thing is a regular stack can be re-used in full - just set
the RSP to the end and take advantage of the whole stack. A shadow stack can
only be used where there is a token.

>   Like we
> both keep saying either of the interfaces works though, it's just a
> taste question with both having downsides.

Fair enough.
Mark Brown Oct. 2, 2024, 1:42 p.m. UTC | #15
On Tue, Oct 01, 2024 at 11:03:10PM +0000, Edgecombe, Rick P wrote:
> On Tue, 2024-10-01 at 18:33 +0100, Mark Brown wrote:

> > My suspicion would be that if we're doing the pivot to a previously used
> > shadow stack we'd also be pivoting the regular stack along with it which
> > would face similar issues with having an unusual method for specifying
> > the stack top so I don't know how much we're really winning.

> I'm not so sure. The thing is a regular stack can be re-used in full - just set
> the RSP to the end and take advantage of the whole stack. A shadow stack can
> only be used where there is a token.

Yeah, I'm not sure how appealing it is trying to use a memory pool with
of shadow stacks - like you say you can't reset the top of the stack so
you need to keep track of that when the stack becomes unused.  If the
users don't leave the SSP at the top of the stack then unless writes
have been enabled (which has security issues) then gradually the size of
the shadow stacks will be eroded which will need to be managed.  You
could do it, but it's clearly not really how things are supposed to
work.  The use case with starting a new worker thread for an existing in
use state seems much more appealing.
Mark Brown Oct. 2, 2024, 9:01 p.m. UTC | #16
On Wed, Oct 02, 2024 at 02:42:58PM +0100, Mark Brown wrote:
> On Tue, Oct 01, 2024 at 11:03:10PM +0000, Edgecombe, Rick P wrote:

> > I'm not so sure. The thing is a regular stack can be re-used in full - just set
> > the RSP to the end and take advantage of the whole stack. A shadow stack can
> > only be used where there is a token.

> Yeah, I'm not sure how appealing it is trying to use a memory pool with
> of shadow stacks - like you say you can't reset the top of the stack so
> you need to keep track of that when the stack becomes unused.  If the
> users don't leave the SSP at the top of the stack then unless writes
> have been enabled (which has security issues) then gradually the size of
> the shadow stacks will be eroded which will need to be managed.  You
> could do it, but it's clearly not really how things are supposed to
> work.  The use case with starting a new worker thread for an existing in
> use state seems much more appealing.

BTW it's probably also worth noting that at least on arm64 (perhaps x86
is different here?) the shadow stack of a thread that exited won't have
a token placed on it so it won't be possible to use it with clone3() at
all unless another token is written.  To get a shadow stack you could
use with clone3() you'd either need to allocate a new one, pivot away
from one that's currently in use or enable shadow stack writes and place
a token.
Edgecombe, Rick P Oct. 2, 2024, 9:25 p.m. UTC | #17
On Wed, 2024-10-02 at 22:01 +0100, Mark Brown wrote:
> BTW it's probably also worth noting that at least on arm64 (perhaps x86
> is different here?) the shadow stack of a thread that exited won't have
> a token placed on it so it won't be possible to use it with clone3() at
> all unless another token is written.  To get a shadow stack you could
> use with clone3() you'd either need to allocate a new one, pivot away
> from one that's currently in use or enable shadow stack writes and place
> a token.

Hmm, yea. I didn't have a specific idea in mind. But yea, you would have to
switch to something in order to leave a token.

If you enabled WRSS (or similar) you might be able to reuse shadow stacks in
some kind of useful way, but in that case you would probably WRSS the token to
the end of the shadow stack and the start+size would fit better.
Yury Khrustalev Oct. 3, 2024, 4:05 p.m. UTC | #18
On Tue, Oct 01, 2024 at 05:12:38PM +0200, Christian Brauner wrote:
> > Thanks for the info!
> > 
> > > 
> > > My preference is to keep the api consistent and require a stack_size for
> > > shadow stacks as well.
> > 
> > Did you catch that a token can be at a different offsets location on the stack
> > depending on args passed to map_shadow_stack? So userspace will need something
> > like the code above, but that adjusts the 'shadow_stack_size' such that the
> > kernel looks for the token in the right place. It will be even weirder if
> > someone uses clone3 to switch to a stack that has already been used, and pivoted
> > off of, such that a token was left in the middle of the stack. In that case
> > userspace would have to come up with args disconnected from the actual size of
> > the shadow stack such that the kernel would be cajoled into looking for the
> > token in the right place.
> > 
> > A shadow stack size is more symmetric on the surface, but I'm not sure it will
> > be easier for userspace to handle. So I think we should just have a pointer to
> > the token. But it will be a usable implementation either way.
> 
> Maybe it's best to let glibc folks decide what is better/more ergonomic for them.

I agree that it would be better to just have a pointer to the token.

My preference would be to avoid having obscure additional arguments that may end up
having misleading name or bear some hidden functionality. If kernel is not going to
use stack size as such, then users should not have to provide it.

Thanks,
Yury

PS Apologies for delayed reply
diff mbox series

Patch

diff --git a/arch/x86/include/asm/shstk.h b/arch/x86/include/asm/shstk.h
index 4cb77e004615..252feeda6999 100644
--- a/arch/x86/include/asm/shstk.h
+++ b/arch/x86/include/asm/shstk.h
@@ -6,6 +6,7 @@ 
 #include <linux/types.h>
 
 struct task_struct;
+struct kernel_clone_args;
 struct ksignal;
 
 #ifdef CONFIG_X86_USER_SHADOW_STACK
@@ -16,8 +17,8 @@  struct thread_shstk {
 
 long shstk_prctl(struct task_struct *task, int option, unsigned long arg2);
 void reset_thread_features(void);
-unsigned long shstk_alloc_thread_stack(struct task_struct *p, unsigned long clone_flags,
-				       unsigned long stack_size);
+unsigned long shstk_alloc_thread_stack(struct task_struct *p,
+				       const struct kernel_clone_args *args);
 void shstk_free(struct task_struct *p);
 int setup_signal_shadow_stack(struct ksignal *ksig);
 int restore_signal_shadow_stack(void);
@@ -28,8 +29,10 @@  static inline long shstk_prctl(struct task_struct *task, int option,
 			       unsigned long arg2) { return -EINVAL; }
 static inline void reset_thread_features(void) {}
 static inline unsigned long shstk_alloc_thread_stack(struct task_struct *p,
-						     unsigned long clone_flags,
-						     unsigned long stack_size) { return 0; }
+						     const struct kernel_clone_args *args)
+{
+	return 0;
+}
 static inline void shstk_free(struct task_struct *p) {}
 static inline int setup_signal_shadow_stack(struct ksignal *ksig) { return 0; }
 static inline int restore_signal_shadow_stack(void) { return 0; }
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index f63f8fd00a91..59456ab8d93f 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -207,7 +207,7 @@  int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 	 * is disabled, new_ssp will remain 0, and fpu_clone() will know not to
 	 * update it.
 	 */
-	new_ssp = shstk_alloc_thread_stack(p, clone_flags, args->stack_size);
+	new_ssp = shstk_alloc_thread_stack(p, args);
 	if (IS_ERR_VALUE(new_ssp))
 		return PTR_ERR((void *)new_ssp);
 
diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
index 059685612362..42b2b18de20d 100644
--- a/arch/x86/kernel/shstk.c
+++ b/arch/x86/kernel/shstk.c
@@ -191,44 +191,103 @@  void reset_thread_features(void)
 	current->thread.features_locked = 0;
 }
 
-unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long clone_flags,
-				       unsigned long stack_size)
+int arch_shstk_validate_clone(struct task_struct *t,
+			      struct vm_area_struct *vma,
+			      struct page *page,
+			      struct kernel_clone_args *args)
+{
+	/*
+	 * SSP is aligned, so reserved bits and mode bit are a zero, just mark
+	 * the token 64-bit.
+	 */
+	void *maddr = kmap_local_page(page);
+	int offset;
+	unsigned long addr, ssp;
+	u64 expected;
+	u64 val;
+
+	if (!features_enabled(ARCH_SHSTK_SHSTK))
+		return 0;
+
+	ssp = args->shadow_stack + args->shadow_stack_size;
+	addr = ssp - SS_FRAME_SIZE;
+	expected = ssp | BIT(0);
+	offset = offset_in_page(ssp);
+
+	/* This should really be an atomic cmpxchg.  It is not. */
+	copy_from_user_page(vma, page, addr, &val, maddr + offset,
+			    sizeof(val));
+
+	if (val != expected)
+		return false;
+	val = 0;
+
+	copy_to_user_page(vma, page, addr, maddr + offset, &val, sizeof(val));
+	set_page_dirty_lock(page);
+
+	return 0;
+}
+
+unsigned long shstk_alloc_thread_stack(struct task_struct *tsk,
+				       const struct kernel_clone_args *args)
 {
 	struct thread_shstk *shstk = &tsk->thread.shstk;
+	unsigned long clone_flags = args->flags;
 	unsigned long addr, size;
 
 	/*
 	 * If shadow stack is not enabled on the new thread, skip any
-	 * switch to a new shadow stack.
+	 * implicit switch to a new shadow stack and reject attempts to
+	 * explicitly specify one.
 	 */
-	if (!features_enabled(ARCH_SHSTK_SHSTK))
+	if (!features_enabled(ARCH_SHSTK_SHSTK)) {
+		if (args->shadow_stack || args->shadow_stack_size)
+			return (unsigned long)ERR_PTR(-EINVAL);
+
 		return 0;
+	}
 
 	/*
-	 * For CLONE_VFORK the child will share the parents shadow stack.
-	 * Make sure to clear the internal tracking of the thread shadow
-	 * stack so the freeing logic run for child knows to leave it alone.
+	 * If the user specified a shadow stack then do some basic
+	 * validation and use it, otherwise fall back to a default
+	 * shadow stack size if the clone_flags don't indicate an
+	 * allocation is unneeded.
 	 */
-	if (clone_flags & CLONE_VFORK) {
+	if (args->shadow_stack) {
+		addr = args->shadow_stack;
+		size = args->shadow_stack_size;
 		shstk->base = 0;
 		shstk->size = 0;
-		return 0;
-	}
+	} else {
+		/*
+		 * For CLONE_VFORK the child will share the parents
+		 * shadow stack.  Make sure to clear the internal
+		 * tracking of the thread shadow stack so the freeing
+		 * logic run for child knows to leave it alone.
+		 */
+		if (clone_flags & CLONE_VFORK) {
+			shstk->base = 0;
+			shstk->size = 0;
+			return 0;
+		}
 
-	/*
-	 * For !CLONE_VM the child will use a copy of the parents shadow
-	 * stack.
-	 */
-	if (!(clone_flags & CLONE_VM))
-		return 0;
+		/*
+		 * For !CLONE_VM the child will use a copy of the
+		 * parents shadow stack.
+		 */
+		if (!(clone_flags & CLONE_VM))
+			return 0;
 
-	size = adjust_shstk_size(stack_size);
-	addr = alloc_shstk(0, size, 0, false);
-	if (IS_ERR_VALUE(addr))
-		return addr;
+		size = args->stack_size;
+		size = adjust_shstk_size(size);
+		addr = alloc_shstk(0, size, 0, false);
+		if (IS_ERR_VALUE(addr))
+			return addr;
 
-	shstk->base = addr;
-	shstk->size = size;
+		/* We allocated the shadow stack, we should deallocate it. */
+		shstk->base = addr;
+		shstk->size = size;
+	}
 
 	return addr + size;
 }
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index d362aacf9f89..c818efdd57af 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -16,6 +16,7 @@  struct task_struct;
 struct rusage;
 union thread_union;
 struct css_set;
+struct vm_area_struct;
 
 /* All the bits taken by the old clone syscall. */
 #define CLONE_LEGACY_FLAGS 0xffffffffULL
@@ -43,6 +44,8 @@  struct kernel_clone_args {
 	void *fn_arg;
 	struct cgroup *cgrp;
 	struct css_set *cset;
+	unsigned long shadow_stack;
+	unsigned long shadow_stack_size;
 };
 
 /*
@@ -230,4 +233,19 @@  static inline void task_unlock(struct task_struct *p)
 
 DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
 
+#ifdef CONFIG_ARCH_HAS_USER_SHADOW_STACK
+int arch_shstk_validate_clone(struct task_struct *p,
+			      struct vm_area_struct *vma,
+			      struct page *page,
+			      struct kernel_clone_args *args);
+#else
+static inline int arch_shstk_validate_clone(struct task_struct *p,
+					    struct vm_area_struct *vma,
+					    struct page *page,
+					    struct kernel_clone_args *args)
+{
+	return 0;
+}
+#endif
+
 #endif /* _LINUX_SCHED_TASK_H */
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 3bac0a8ceab2..8b7af52548fd 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -84,6 +84,10 @@ 
  *                kernel's limit of nested PID namespaces.
  * @cgroup:       If CLONE_INTO_CGROUP is specified set this to
  *                a file descriptor for the cgroup.
+ * @shadow_stack: Pointer to the memory allocated for the child
+ *                shadow stack.
+ * @shadow_stack_size: Specify the size of the shadow stack for
+ *                     the child process.
  *
  * The structure is versioned by size and thus extensible.
  * New struct members must go at the end of the struct and
@@ -101,12 +105,15 @@  struct clone_args {
 	__aligned_u64 set_tid;
 	__aligned_u64 set_tid_size;
 	__aligned_u64 cgroup;
+	__aligned_u64 shadow_stack;
+	__aligned_u64 shadow_stack_size;
 };
 #endif
 
-#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
-#define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */
-#define CLONE_ARGS_SIZE_VER2 88 /* sizeof third published struct */
+#define CLONE_ARGS_SIZE_VER0  64 /* sizeof first published struct */
+#define CLONE_ARGS_SIZE_VER1  80 /* sizeof second published struct */
+#define CLONE_ARGS_SIZE_VER2  88 /* sizeof third published struct */
+#define CLONE_ARGS_SIZE_VER3 104 /* sizeof fourth published struct */
 
 /*
  * Scheduling policies
diff --git a/kernel/fork.c b/kernel/fork.c
index cc760491f201..275d8cf3f66b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -128,6 +128,11 @@ 
  */
 #define MAX_THREADS FUTEX_TID_MASK
 
+/*
+ * Require that shadow stacks can store at least one element
+ */
+#define SHADOW_STACK_SIZE_MIN sizeof(void *)
+
 /*
  * Protected counters by write_lock_irq(&tasklist_lock)
  */
@@ -2107,6 +2112,56 @@  static void rv_task_fork(struct task_struct *p)
 #define rv_task_fork(p) do {} while (0)
 #endif
 
+static int shstk_validate_clone(struct task_struct *p,
+				struct kernel_clone_args *args)
+{
+	struct mm_struct *mm;
+	struct vm_area_struct *vma;
+	struct page *page;
+	unsigned long addr;
+	int ret;
+
+	if (!IS_ENABLED(CONFIG_ARCH_HAS_USER_SHADOW_STACK))
+		return 0;
+
+	if (!args->shadow_stack)
+		return 0;
+
+	mm = get_task_mm(p);
+	if (!mm)
+		return -EFAULT;
+
+	mmap_read_lock(mm);
+
+	/*
+	 * All current shadow stack architectures have tokens at the
+	 * top of a downward growing shadow stack.
+	 */
+	addr = args->shadow_stack + args->shadow_stack_size - 1;
+	addr = untagged_addr_remote(mm, addr);
+
+	page = get_user_page_vma_remote(mm, addr, FOLL_FORCE | FOLL_WRITE,
+					&vma);
+	if (IS_ERR(page)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (!(vma->vm_flags & VM_SHADOW_STACK)) {
+		ret = -EFAULT;
+		goto out_page;
+	}
+
+	ret = arch_shstk_validate_clone(p, vma, page, args);
+
+out_page:
+	put_page(page);
+out:
+	mmap_read_unlock(mm);
+	mmput(mm);
+	return ret;
+}
+
 /*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
@@ -2381,6 +2436,9 @@  __latent_entropy struct task_struct *copy_process(
 	if (retval)
 		goto bad_fork_cleanup_namespaces;
 	retval = copy_thread(p, args);
+	if (retval)
+		goto bad_fork_cleanup_io;
+	retval = shstk_validate_clone(p, args);
 	if (retval)
 		goto bad_fork_cleanup_io;
 
@@ -2939,7 +2997,9 @@  noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
 		     CLONE_ARGS_SIZE_VER1);
 	BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) !=
 		     CLONE_ARGS_SIZE_VER2);
-	BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2);
+	BUILD_BUG_ON(offsetofend(struct clone_args, shadow_stack_size) !=
+		     CLONE_ARGS_SIZE_VER3);
+	BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER3);
 
 	if (unlikely(usize > PAGE_SIZE))
 		return -E2BIG;
@@ -2972,16 +3032,18 @@  noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
 		return -EINVAL;
 
 	*kargs = (struct kernel_clone_args){
-		.flags		= args.flags,
-		.pidfd		= u64_to_user_ptr(args.pidfd),
-		.child_tid	= u64_to_user_ptr(args.child_tid),
-		.parent_tid	= u64_to_user_ptr(args.parent_tid),
-		.exit_signal	= args.exit_signal,
-		.stack		= args.stack,
-		.stack_size	= args.stack_size,
-		.tls		= args.tls,
-		.set_tid_size	= args.set_tid_size,
-		.cgroup		= args.cgroup,
+		.flags			= args.flags,
+		.pidfd			= u64_to_user_ptr(args.pidfd),
+		.child_tid		= u64_to_user_ptr(args.child_tid),
+		.parent_tid		= u64_to_user_ptr(args.parent_tid),
+		.exit_signal		= args.exit_signal,
+		.stack			= args.stack,
+		.stack_size		= args.stack_size,
+		.tls			= args.tls,
+		.set_tid_size		= args.set_tid_size,
+		.cgroup			= args.cgroup,
+		.shadow_stack		= args.shadow_stack,
+		.shadow_stack_size	= args.shadow_stack_size,
 	};
 
 	if (args.set_tid &&
@@ -3022,6 +3084,34 @@  static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
 	return true;
 }
 
+/**
+ * clone3_shadow_stack_valid - check and prepare shadow stack
+ * @kargs: kernel clone args
+ *
+ * Verify that shadow stacks are only enabled if supported.
+ */
+static inline bool clone3_shadow_stack_valid(struct kernel_clone_args *kargs)
+{
+	if (kargs->shadow_stack) {
+		if (!kargs->shadow_stack_size)
+			return false;
+
+		if (kargs->shadow_stack_size < SHADOW_STACK_SIZE_MIN)
+			return false;
+
+		if (kargs->shadow_stack_size > rlimit(RLIMIT_STACK))
+			return false;
+
+		/*
+		 * The architecture must check support on the specific
+		 * machine.
+		 */
+		return IS_ENABLED(CONFIG_ARCH_HAS_USER_SHADOW_STACK);
+	} else {
+		return !kargs->shadow_stack_size;
+	}
+}
+
 static bool clone3_args_valid(struct kernel_clone_args *kargs)
 {
 	/* Verify that no unknown flags are passed along. */
@@ -3044,7 +3134,7 @@  static bool clone3_args_valid(struct kernel_clone_args *kargs)
 	    kargs->exit_signal)
 		return false;
 
-	if (!clone3_stack_valid(kargs))
+	if (!clone3_stack_valid(kargs) || !clone3_shadow_stack_valid(kargs))
 		return false;
 
 	return true;