@@ -410,6 +410,18 @@ config CC_HAS_SANE_STACKPROTECTOR
the compiler produces broken code or if it does not let us control
the segment on 32-bit kernels.
+config CC_HAS_CUSTOMIZED_STACKPROTECTOR
+ bool
+ # Although clang supports -mstack-protector-guard-reg option, it
+ # would generate GOT reference for __stack_chk_guard even with
+ # -fno-PIE flag.
+ default y if (!CC_IS_CLANG && $(cc-option,-mstack-protector-guard-reg=gs))
+
+config STACKPROTECTOR_FIXED
+ bool
+ depends on X86_64 && STACKPROTECTOR
+ default !CC_HAS_CUSTOMIZED_STACKPROTECTOR
+
menu "Processor type and features"
config SMP
@@ -111,13 +111,7 @@ ifeq ($(CONFIG_X86_32),y)
# temporary until string.h is fixed
KBUILD_CFLAGS += -ffreestanding
- ifeq ($(CONFIG_STACKPROTECTOR),y)
- ifeq ($(CONFIG_SMP),y)
- KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
- else
- KBUILD_CFLAGS += -mstack-protector-guard=global
- endif
- endif
+ percpu_seg := fs
else
BITS := 64
UTS_MACHINE := x86_64
@@ -167,6 +161,19 @@ else
KBUILD_CFLAGS += -mcmodel=kernel
KBUILD_RUSTFLAGS += -Cno-redzone=y
KBUILD_RUSTFLAGS += -Ccode-model=kernel
+
+ percpu_seg := gs
+endif
+
+ifeq ($(CONFIG_STACKPROTECTOR),y)
+ ifneq ($(CONFIG_STACKPROTECTOR_FIXED),y)
+ ifeq ($(CONFIG_SMP),y)
+ KBUILD_CFLAGS += -mstack-protector-guard-reg=$(percpu_seg) \
+ -mstack-protector-guard-symbol=__stack_chk_guard
+ else
+ KBUILD_CFLAGS += -mstack-protector-guard=global
+ endif
+ endif
endif
#
@@ -229,6 +229,10 @@ SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
int3
SYM_CODE_END(entry_SYSCALL_64)
+#ifdef CONFIG_STACKPROTECTOR_FIXED
+#define __stack_chk_guard fixed_percpu_data + FIXED_stack_canary
+#endif
+
/*
* %rdi: prev task
* %rsi: next task
@@ -252,7 +256,7 @@ SYM_FUNC_START(__switch_to_asm)
#ifdef CONFIG_STACKPROTECTOR
movq TASK_stack_canary(%rsi), %rbx
- movq %rbx, PER_CPU_VAR(fixed_percpu_data) + FIXED_stack_canary
+ movq %rbx, PER_CPU_VAR(__stack_chk_guard)
#endif
/*
@@ -379,6 +379,8 @@ struct irq_stack {
} __aligned(IRQ_STACK_SIZE);
#ifdef CONFIG_X86_64
+
+#ifdef CONFIG_STACKPROTECTOR_FIXED
struct fixed_percpu_data {
/*
* GCC hardcodes the stack canary as %gs:40. Since the
@@ -394,21 +396,26 @@ struct fixed_percpu_data {
DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
DECLARE_INIT_PER_CPU(fixed_percpu_data);
+#endif /* CONFIG_STACKPROTECTOR_FIXED */
static inline unsigned long cpu_kernelmode_gs_base(int cpu)
{
+#ifdef CONFIG_STACKPROTECTOR_FIXED
return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
+#else
+#ifdef CONFIG_SMP
+ return per_cpu_offset(cpu);
+#else
+ return 0;
+#endif
+#endif
}
extern asmlinkage void ignore_sysret(void);
/* Save actual FS/GS selectors and bases to current->thread */
void current_save_fsgs(void);
-#else /* X86_64 */
-#ifdef CONFIG_STACKPROTECTOR
-DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
-#endif
-#endif /* !X86_64 */
+#endif /* X86_64 */
struct perf_event;
@@ -36,6 +36,12 @@
#include <linux/sched.h>
+#ifdef CONFIG_STACKPROTECTOR_FIXED
+#define __stack_chk_guard fixed_percpu_data.stack_canary
+#else
+DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
+#endif
+
/*
* Initialize the stackprotector canary value.
*
@@ -51,25 +57,17 @@ static __always_inline void boot_init_stack_canary(void)
{
unsigned long canary = get_random_canary();
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_STACKPROTECTOR_FIXED
BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40);
#endif
current->stack_canary = canary;
-#ifdef CONFIG_X86_64
- this_cpu_write(fixed_percpu_data.stack_canary, canary);
-#else
this_cpu_write(__stack_chk_guard, canary);
-#endif
}
static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
{
-#ifdef CONFIG_X86_64
- per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
-#else
per_cpu(__stack_chk_guard, cpu) = idle->stack_canary;
-#endif
}
#else /* STACKPROTECTOR */
@@ -56,7 +56,7 @@ int main(void)
BLANK();
-#ifdef CONFIG_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR_FIXED
OFFSET(FIXED_stack_canary, fixed_percpu_data, stack_canary);
BLANK();
#endif
@@ -2051,10 +2051,6 @@ DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
EXPORT_PER_CPU_SYMBOL(pcpu_hot);
#ifdef CONFIG_X86_64
-DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
- fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
-EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
-
static void wrmsrl_cstar(unsigned long val)
{
/*
@@ -2102,15 +2098,18 @@ void syscall_init(void)
X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
X86_EFLAGS_AC|X86_EFLAGS_ID);
}
-
-#else /* CONFIG_X86_64 */
+#endif /* CONFIG_X86_64 */
#ifdef CONFIG_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR_FIXED
+DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
+ fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
+EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
+#else
DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
#endif
-
-#endif /* CONFIG_X86_64 */
+#endif
/*
* Clear all 6 debug registers:
@@ -68,7 +68,13 @@ SYM_CODE_START_NOALIGN(startup_64)
/* Setup GSBASE to allow stack canary access for C code */
movl $MSR_GS_BASE, %ecx
+#if defined(CONFIG_STACKPROTECTOR_FIXED)
leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
+#elif defined(CONFIG_SMP)
+ movabs $__per_cpu_load, %rdx
+#else
+ xorl %edx, %edx
+#endif
movl %edx, %eax
shrq $32, %rdx
wrmsr
@@ -283,16 +289,14 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
movl %eax,%fs
movl %eax,%gs
- /* Set up %gs.
- *
- * The base of %gs always points to fixed_percpu_data. If the
- * stack protector canary is enabled, it is located at %gs:40.
+ /*
+ * Set up GS base.
* Note that, on SMP, the boot cpu uses init data section until
* the per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
-#ifndef CONFIG_SMP
- leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
+#if !defined(CONFIG_SMP) && defined(CONFIG_STACKPROTECTOR_FIXED)
+ leaq __per_cpu_load(%rip), %rdx
#endif
movl %edx, %eax
shrq $32, %rdx
@@ -500,12 +500,14 @@ SECTIONS
*/
#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
INIT_PER_CPU(gdt_page);
-INIT_PER_CPU(fixed_percpu_data);
INIT_PER_CPU(irq_stack_backing_store);
+#ifdef CONFIG_STACKPROTECTOR_FIXED
+INIT_PER_CPU(fixed_percpu_data);
#ifdef CONFIG_SMP
. = ASSERT((fixed_percpu_data == 0),
"fixed_percpu_data is not at start of per-cpu area");
#endif
+#endif
#endif /* CONFIG_X86_64 */
@@ -96,8 +96,16 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
1:
/* Set base address in stack canary descriptor. */
mov $MSR_GS_BASE,%ecx
+#if defined(CONFIG_STACKPROTECTOR_FIXED)
mov $_pa(INIT_PER_CPU_VAR(fixed_percpu_data)), %eax
xor %edx, %edx
+#elif defined(CONFIG_SMP)
+ mov $__per_cpu_load, %rax
+ cdq
+#else
+ xor %eax, %eax
+ xor %edx, %edx
+#endif
wrmsr
call xen_prepare_pvh
@@ -51,15 +51,19 @@ SYM_CODE_START(startup_xen)
leaq (__end_init_task - PTREGS_SIZE)(%rip), %rsp
- /* Set up %gs.
- *
- * The base of %gs always points to fixed_percpu_data. If the
- * stack protector canary is enabled, it is located at %gs:40.
+ /*
+ * Set up GS base.
* Note that, on SMP, the boot cpu uses init data section until
* the per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
- movq $INIT_PER_CPU_VAR(fixed_percpu_data),%rax
+#if defined(CONFIG_STACKPROTECTOR_FIXED)
+ leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
+#elif defined(CONFIG_SMP)
+ movabs $__per_cpu_load, %rdx
+#else
+ xorl %eax, %eax
+#endif
cdq
wrmsr