Message ID | 20200324203231.64324-4-keescook@chromium.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Optionally randomize kernel stack offset each syscall | expand |
On Tue, Mar 24, 2020 at 01:32:29PM -0700, Kees Cook wrote: > +/* > + * Do not use this anywhere else in the kernel. This is used here because > + * it provides an arch-agnostic way to grow the stack with correct > + * alignment. Also, since this use is being explicitly masked to a max of > + * 10 bits, stack-clash style attacks are unlikely. For more details see > + * "VLAs" in Documentation/process/deprecated.rst > + */ > +void *__builtin_alloca(size_t size); > + > +#define add_random_kstack_offset() do { \ > + if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ > + &randomize_kstack_offset)) { \ > + u32 offset = this_cpu_read(kstack_offset); \ > + char *ptr = __builtin_alloca(offset & 0x3FF); \ > + asm volatile("" : "=m"(*ptr)); \ Is this asm() a homebrew OPTIMIZER_HIDE_VAR(*ptr)? If the asm constraints generate metter code, could we add those as alternative constraints in OPTIMIZER_HIDE_VAR() ? Mark.
On Mon, Mar 30, 2020 at 12:25:36PM +0100, Mark Rutland wrote: > On Tue, Mar 24, 2020 at 01:32:29PM -0700, Kees Cook wrote: > > +/* > > + * Do not use this anywhere else in the kernel. This is used here because > > + * it provides an arch-agnostic way to grow the stack with correct > > + * alignment. Also, since this use is being explicitly masked to a max of > > + * 10 bits, stack-clash style attacks are unlikely. For more details see > > + * "VLAs" in Documentation/process/deprecated.rst > > + */ > > +void *__builtin_alloca(size_t size); > > + > > +#define add_random_kstack_offset() do { \ > > + if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ > > + &randomize_kstack_offset)) { \ > > + u32 offset = this_cpu_read(kstack_offset); \ > > + char *ptr = __builtin_alloca(offset & 0x3FF); \ > > + asm volatile("" : "=m"(*ptr)); \ > > Is this asm() a homebrew OPTIMIZER_HIDE_VAR(*ptr)? If the asm > constraints generate metter code, could we add those as alternative > constraints in OPTIMIZER_HIDE_VAR() ? Hah, yes, it is. And this produces identical asm, so I've replaced it with OPTIMIZER_HIDE_VAR() now. Now if I could figure out how to hide it from stack protector. :(
On Mon, Mar 30, 2020 at 12:25:36PM +0100, Mark Rutland wrote: > On Tue, Mar 24, 2020 at 01:32:29PM -0700, Kees Cook wrote: > > +/* > > + * Do not use this anywhere else in the kernel. This is used here because > > + * it provides an arch-agnostic way to grow the stack with correct > > + * alignment. Also, since this use is being explicitly masked to a max of > > + * 10 bits, stack-clash style attacks are unlikely. For more details see > > + * "VLAs" in Documentation/process/deprecated.rst > > + */ > > +void *__builtin_alloca(size_t size); > > + > > +#define add_random_kstack_offset() do { \ > > + if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ > > + &randomize_kstack_offset)) { \ > > + u32 offset = this_cpu_read(kstack_offset); \ > > + char *ptr = __builtin_alloca(offset & 0x3FF); \ > > + asm volatile("" : "=m"(*ptr)); \ > > Is this asm() a homebrew OPTIMIZER_HIDE_VAR(*ptr)? If the asm > constraints generate metter code, could we add those as alternative > constraints in OPTIMIZER_HIDE_VAR() ? Er, no, sorry, not the same. I disassembled the wrong binary. :) With asm volatile("" : "=m"(*ptr)) ffffffff810038bc: 48 8d 44 24 0f lea 0xf(%rsp),%rax ffffffff810038c1: 48 83 e0 f0 and $0xfffffffffffffff0,%rax With __asm__ ("" : "=r" (var) : "0" (var)) ffffffff810038bc: 48 8d 54 24 0f lea 0xf(%rsp),%rdx ffffffff810038c1: 48 83 e2 f0 and $0xfffffffffffffff0,%rdx ffffffff810038c5: 0f b6 02 movzbl (%rdx),%eax ffffffff810038c8: 88 02 mov %al,(%rdx) It looks like OPTIMIZER_HIDE_VAR() is basically just: var = var; In the former case, we avoid the write and retain the allocation. So I think don't think OPTIMIZER_HIDE_VAR() should be used here, nor should OPTIMIZER_HIDE_VAR() be changed to remove the "0" (var) bit.
diff --git a/Makefile b/Makefile index 171f2b004c8a..c99463406522 100644 --- a/Makefile +++ b/Makefile @@ -779,6 +779,10 @@ ifdef CONFIG_INIT_STACK_ALL KBUILD_CFLAGS += -ftrivial-auto-var-init=pattern endif +# While VLAs have been removed, GCC produces unreachable stack probes +# for the random_kstack_offset feature. Disable it for all compilers. +KBUILD_CFLAGS += $(call cc-option,-fno-stack-clash-protection,) + DEBUG_CFLAGS := $(call cc-option, -fno-var-tracking-assignments) ifdef CONFIG_DEBUG_INFO diff --git a/arch/Kconfig b/arch/Kconfig index 17fe351cdde0..619a56da4b76 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -854,6 +854,25 @@ config VMAP_STACK virtual mappings with real shadow memory, and KASAN_VMALLOC must be enabled. +config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET + def_bool n + help + An arch should select this symbol if it can support kernel stack + offset randomization with calls to add_random_kstack_offset() + during syscall entry and choose_random_kstack_offset() during + syscall exit. + +config RANDOMIZE_KSTACK_OFFSET_DEFAULT + bool "Randomize kernel stack offset on syscall entry" + depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET + help + The kernel stack offset can be randomized (after pt_regs) by + roughly 5 bits of entropy, frustrating memory corruption + attacks that depend on stack address determinism or + cross-syscall address exposures. This feature is controlled + by kernel boot param "randomize_kstack_offset=on/off", and this + config chooses the default boot state. + config ARCH_OPTIONAL_KERNEL_RWX def_bool n diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h new file mode 100644 index 000000000000..651ba9504568 --- /dev/null +++ b/include/linux/randomize_kstack.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _LINUX_RANDOMIZE_KSTACK_H +#define _LINUX_RANDOMIZE_KSTACK_H + +#include <linux/kernel.h> +#include <linux/jump_label.h> +#include <linux/percpu-defs.h> + +DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, + randomize_kstack_offset); +DECLARE_PER_CPU(u32, kstack_offset); + +/* + * Do not use this anywhere else in the kernel. This is used here because + * it provides an arch-agnostic way to grow the stack with correct + * alignment. Also, since this use is being explicitly masked to a max of + * 10 bits, stack-clash style attacks are unlikely. For more details see + * "VLAs" in Documentation/process/deprecated.rst + */ +void *__builtin_alloca(size_t size); + +#define add_random_kstack_offset() do { \ + if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ + &randomize_kstack_offset)) { \ + u32 offset = this_cpu_read(kstack_offset); \ + char *ptr = __builtin_alloca(offset & 0x3FF); \ + asm volatile("" : "=m"(*ptr)); \ + } \ +} while (0) + +#define choose_random_kstack_offset(rand) do { \ + if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ + &randomize_kstack_offset)) { \ + u32 offset = this_cpu_read(kstack_offset); \ + offset ^= (rand); \ + this_cpu_write(kstack_offset, offset); \ + } \ +} while (0) + +#endif diff --git a/init/main.c b/init/main.c index ee4947af823f..78fe3aea00b0 100644 --- a/init/main.c +++ b/init/main.c @@ -777,6 +777,29 @@ static void __init mm_init(void) pti_init(); } +#ifdef CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET +DEFINE_STATIC_KEY_MAYBE_RO(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, + randomize_kstack_offset); +DEFINE_PER_CPU(u32, kstack_offset); + +static int __init early_randomize_kstack_offset(char *buf) +{ + int ret; + bool bool_result; + + ret = kstrtobool(buf, &bool_result); + if (ret) + return ret; + + if (bool_result) + static_branch_enable(&randomize_kstack_offset); + else + static_branch_disable(&randomize_kstack_offset); + return 0; +} +early_param("randomize_kstack_offset", early_randomize_kstack_offset); +#endif + void __init __weak arch_call_rest_init(void) { rest_init();