Message ID | 20210316151054.5405-29-yu-cheng.yu@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Control-flow Enforcement: Shadow Stack | expand |
On Tue, Mar 16, 2021 at 08:10:54AM -0700, Yu-cheng Yu wrote: > There are three possible options to create a shadow stack allocation API: > an arch_prctl, a new syscall, or adding PROT_SHSTK to mmap()/mprotect(). > Each has its advantages and compromises. > > An arch_prctl() is the least intrusive. However, the existing x86 > arch_prctl() takes only two parameters. Multiple parameters must be > passed in a memory buffer. There is a proposal to pass more parameters in > registers [1], but no active discussion on that. > > A new syscall minimizes compatibility issues and offers an extensible frame > work to other architectures, but this will likely result in some overlap of > mmap()/mprotect(). > > The introduction of PROT_SHSTK to mmap()/mprotect() takes advantage of > existing APIs. The x86-specific PROT_SHSTK is translated to VM_SHSTK and > a shadow stack mapping is created without reinventing the wheel. There are > potential pitfalls though. The most obvious one would be using this as a > bypass to shadow stack protection. However, the attacker would have to get > to the syscall first. > > [1] https://lore.kernel.org/lkml/20200828121624.108243-1-hjl.tools@gmail.com/ > > Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com> > Reviewed-by: Kees Cook <keescook@chromium.org> > --- > arch/x86/include/asm/mman.h | 57 +++++++++++++++++++++++++++++++- > arch/x86/include/uapi/asm/mman.h | 1 + > include/linux/mm.h | 1 + > mm/mmap.c | 8 ++++- > 4 files changed, 65 insertions(+), 2 deletions(-) > > diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h > index 629f6c81263a..bd94e30b5d34 100644 > --- a/arch/x86/include/asm/mman.h > +++ b/arch/x86/include/asm/mman.h > @@ -20,11 +20,66 @@ > ((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) | \ > ((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0)) > > -#define arch_calc_vm_prot_bits(prot, key) ( \ > +#define pkey_vm_prot_bits(prot, key) ( \ > ((key) & 0x1 ? VM_PKEY_BIT0 : 0) | \ > ((key) & 0x2 ? VM_PKEY_BIT1 : 0) | \ > ((key) & 0x4 ? VM_PKEY_BIT2 : 0) | \ > ((key) & 0x8 ? VM_PKEY_BIT3 : 0)) > +#else > +#define pkey_vm_prot_bits(prot, key) (0) > +#endif > + > +static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > + unsigned long pkey) > +{ > + unsigned long vm_prot_bits = pkey_vm_prot_bits(prot, pkey); > + > + if (!(prot & PROT_WRITE) && (prot & PROT_SHSTK)) > + vm_prot_bits |= VM_SHSTK; > + > + return vm_prot_bits; > +} > + > +#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) > + > +#ifdef CONFIG_X86_CET > +static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) > +{ > + unsigned long valid = PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM; > + > + if (prot & ~(valid | PROT_SHSTK)) Why PROT_SHSTK is not part of valid? > + return false; > + > + if (prot & PROT_SHSTK) { > + struct vm_area_struct *vma; > + > + if (!current->thread.cet.shstk_size) > + return false; > + > + /* > + * A shadow stack mapping is indirectly writable by only > + * the CALL and WRUSS instructions, but not other write > + * instructions). PROT_SHSTK and PROT_WRITE are mutually > + * exclusive. > + */ > + if (prot & PROT_WRITE) > + return false; > + > + vma = find_vma(current->mm, addr); > + if (!vma) > + return false; NAK. This is racy. arch_validate_prot() called outside of mmap_lock and the vma may be freed or modified under us. do_mprotect_pkey() already calls find_vma() with the right locking. Maybe re-strucure do_mprotect_pkey() to call arch_validate_prot() after find_vma() and pass down the vma? > + > + /* > + * Shadow stack cannot be backed by a file or shared. > + */ > + if (vma->vm_file || (vma->vm_flags & VM_SHARED)) > + return false; > + } > + > + return true; > +} > + > +#define arch_validate_prot arch_validate_prot > #endif > > #endif /* _ASM_X86_MMAN_H */ > diff --git a/arch/x86/include/uapi/asm/mman.h b/arch/x86/include/uapi/asm/mman.h > index 3ce1923e6ed9..39bb7db344a6 100644 > --- a/arch/x86/include/uapi/asm/mman.h > +++ b/arch/x86/include/uapi/asm/mman.h > @@ -4,6 +4,7 @@ > > #define MAP_32BIT 0x40 /* only give out 32bit addresses */ > > +#define PROT_SHSTK 0x10 /* shadow stack pages */ > > #include <asm-generic/mman.h> > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index e178be052419..40c4b0fe7cc4 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -342,6 +342,7 @@ extern unsigned int kobjsize(const void *objp); > > #if defined(CONFIG_X86) > # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ > +# define VM_ARCH_CLEAR VM_SHSTK > #elif defined(CONFIG_PPC) > # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ > #elif defined(CONFIG_PARISC) > diff --git a/mm/mmap.c b/mm/mmap.c > index 99077171010b..934cb3cbe952 100644 > --- a/mm/mmap.c > +++ b/mm/mmap.c > @@ -1481,6 +1481,12 @@ unsigned long do_mmap(struct file *file, unsigned long addr, > struct inode *inode = file_inode(file); > unsigned long flags_mask; > > + /* > + * Call stack cannot be backed by a file. > + */ > + if (vm_flags & VM_SHSTK) > + return -EINVAL; > + > if (!file_mmap_ok(file, inode, pgoff, len)) > return -EOVERFLOW; > > @@ -1545,7 +1551,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, > } else { > switch (flags & MAP_TYPE) { > case MAP_SHARED: > - if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) > + if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP|VM_SHSTK)) > return -EINVAL; > /* > * Ignore pgoff. > -- > 2.21.0 >
diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h index 629f6c81263a..bd94e30b5d34 100644 --- a/arch/x86/include/asm/mman.h +++ b/arch/x86/include/asm/mman.h @@ -20,11 +20,66 @@ ((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) | \ ((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0)) -#define arch_calc_vm_prot_bits(prot, key) ( \ +#define pkey_vm_prot_bits(prot, key) ( \ ((key) & 0x1 ? VM_PKEY_BIT0 : 0) | \ ((key) & 0x2 ? VM_PKEY_BIT1 : 0) | \ ((key) & 0x4 ? VM_PKEY_BIT2 : 0) | \ ((key) & 0x8 ? VM_PKEY_BIT3 : 0)) +#else +#define pkey_vm_prot_bits(prot, key) (0) +#endif + +static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, + unsigned long pkey) +{ + unsigned long vm_prot_bits = pkey_vm_prot_bits(prot, pkey); + + if (!(prot & PROT_WRITE) && (prot & PROT_SHSTK)) + vm_prot_bits |= VM_SHSTK; + + return vm_prot_bits; +} + +#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) + +#ifdef CONFIG_X86_CET +static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) +{ + unsigned long valid = PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM; + + if (prot & ~(valid | PROT_SHSTK)) + return false; + + if (prot & PROT_SHSTK) { + struct vm_area_struct *vma; + + if (!current->thread.cet.shstk_size) + return false; + + /* + * A shadow stack mapping is indirectly writable by only + * the CALL and WRUSS instructions, but not other write + * instructions). PROT_SHSTK and PROT_WRITE are mutually + * exclusive. + */ + if (prot & PROT_WRITE) + return false; + + vma = find_vma(current->mm, addr); + if (!vma) + return false; + + /* + * Shadow stack cannot be backed by a file or shared. + */ + if (vma->vm_file || (vma->vm_flags & VM_SHARED)) + return false; + } + + return true; +} + +#define arch_validate_prot arch_validate_prot #endif #endif /* _ASM_X86_MMAN_H */ diff --git a/arch/x86/include/uapi/asm/mman.h b/arch/x86/include/uapi/asm/mman.h index 3ce1923e6ed9..39bb7db344a6 100644 --- a/arch/x86/include/uapi/asm/mman.h +++ b/arch/x86/include/uapi/asm/mman.h @@ -4,6 +4,7 @@ #define MAP_32BIT 0x40 /* only give out 32bit addresses */ +#define PROT_SHSTK 0x10 /* shadow stack pages */ #include <asm-generic/mman.h> diff --git a/include/linux/mm.h b/include/linux/mm.h index e178be052419..40c4b0fe7cc4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -342,6 +342,7 @@ extern unsigned int kobjsize(const void *objp); #if defined(CONFIG_X86) # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ +# define VM_ARCH_CLEAR VM_SHSTK #elif defined(CONFIG_PPC) # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) diff --git a/mm/mmap.c b/mm/mmap.c index 99077171010b..934cb3cbe952 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1481,6 +1481,12 @@ unsigned long do_mmap(struct file *file, unsigned long addr, struct inode *inode = file_inode(file); unsigned long flags_mask; + /* + * Call stack cannot be backed by a file. + */ + if (vm_flags & VM_SHSTK) + return -EINVAL; + if (!file_mmap_ok(file, inode, pgoff, len)) return -EOVERFLOW; @@ -1545,7 +1551,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, } else { switch (flags & MAP_TYPE) { case MAP_SHARED: - if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) + if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP|VM_SHSTK)) return -EINVAL; /* * Ignore pgoff.