@@ -65,6 +65,7 @@ config ARM64
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_SUPPORTS_MEMORY_FAILURE
+ select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
select ARCH_SUPPORTS_NUMA_BALANCING
@@ -1022,6 +1023,10 @@ config ARCH_HAS_CACHE_LINE_SIZE
config ARCH_ENABLE_SPLIT_PMD_PTLOCK
def_bool y if PGTABLE_LEVELS > 2
+# Supported by clang >= 7.0
+config CC_HAVE_SHADOW_CALL_STACK
+ def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18)
+
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
---help---
new file mode 100644
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SCS_H
+#define _ASM_SCS_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/scs.h>
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+
+extern void scs_init_irq(void);
+
+static __always_inline void scs_save(struct task_struct *tsk)
+{
+ void *s;
+
+ asm volatile("mov %0, x18" : "=r" (s));
+ task_set_scs(tsk, s);
+}
+
+static inline void scs_overflow_check(struct task_struct *tsk)
+{
+ if (unlikely(scs_corrupted(tsk)))
+ panic("corrupted shadow stack detected inside scheduler\n");
+}
+
+#else /* CONFIG_SHADOW_CALL_STACK */
+
+static inline void scs_init_irq(void) {}
+static inline void scs_save(struct task_struct *tsk) {}
+static inline void scs_overflow_check(struct task_struct *tsk) {}
+
+#endif /* CONFIG_SHADOW_CALL_STACK */
+
+#endif /* __ASSEMBLY __ */
+
+#endif /* _ASM_SCS_H */
@@ -41,6 +41,9 @@ struct thread_info {
#endif
} preempt;
};
+#ifdef CONFIG_SHADOW_CALL_STACK
+ void *shadow_call_stack;
+#endif
};
#define thread_saved_pc(tsk) \
@@ -63,6 +63,7 @@ obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
obj-$(CONFIG_ARM64_SSBD) += ssbd.o
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
+obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-y += vdso/ probes/
obj-$(CONFIG_COMPAT_VDSO) += vdso32/
@@ -33,6 +33,9 @@ int main(void)
DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
+#endif
+#ifdef CONFIG_SHADOW_CALL_STACK
+ DEFINE(TSK_TI_SCS, offsetof(struct task_struct, thread_info.shadow_call_stack));
#endif
DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
#ifdef CONFIG_STACKPROTECTOR
@@ -177,6 +177,10 @@ alternative_cb_end
apply_ssbd 1, x22, x23
+#ifdef CONFIG_SHADOW_CALL_STACK
+ ldr x18, [tsk, #TSK_TI_SCS] // Restore shadow call stack
+ str xzr, [tsk, #TSK_TI_SCS] // Limit visibility of saved SCS
+#endif
.else
add x21, sp, #S_FRAME_SIZE
get_current_task tsk
@@ -278,6 +282,12 @@ alternative_else_nop_endif
ct_user_enter
.endif
+#ifdef CONFIG_SHADOW_CALL_STACK
+ .if \el == 0
+ str x18, [tsk, #TSK_TI_SCS] // Save shadow call stack
+ .endif
+#endif
+
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
@@ -383,6 +393,9 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
.macro irq_stack_entry
mov x19, sp // preserve the original sp
+#ifdef CONFIG_SHADOW_CALL_STACK
+ mov x24, x18 // preserve the original shadow stack
+#endif
/*
* Compare sp with the base of the task stack.
@@ -400,15 +413,25 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
/* switch to the irq stack */
mov sp, x26
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+ /* also switch to the irq shadow stack */
+ ldr_this_cpu x18, irq_shadow_call_stack_ptr, x26
+#endif
+
9998:
.endm
/*
- * x19 should be preserved between irq_stack_entry and
- * irq_stack_exit.
+ * The callee-saved regs (x19-x29) should be preserved between
+ * irq_stack_entry and irq_stack_exit, but note that kernel_entry
+ * uses x20-x23 to store data for later use.
*/
.macro irq_stack_exit
mov sp, x19
+#ifdef CONFIG_SHADOW_CALL_STACK
+ mov x18, x24
+#endif
.endm
/* GPRs used by entry code */
@@ -895,6 +918,11 @@ ENTRY(cpu_switch_to)
ldr lr, [x8]
mov sp, x9
msr sp_el0, x1
+#ifdef CONFIG_SHADOW_CALL_STACK
+ str x18, [x0, #TSK_TI_SCS]
+ ldr x18, [x1, #TSK_TI_SCS]
+ str xzr, [x1, #TSK_TI_SCS] // limit visibility of saved SCS
+#endif
ret
ENDPROC(cpu_switch_to)
NOKPROBE(cpu_switch_to)
@@ -27,6 +27,7 @@
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/page.h>
+#include <asm/scs.h>
#include <asm/smp.h>
#include <asm/sysreg.h>
#include <asm/thread_info.h>
@@ -424,6 +425,10 @@ __primary_switched:
stp xzr, x30, [sp, #-16]!
mov x29, sp
+#ifdef CONFIG_SHADOW_CALL_STACK
+ adr_l x18, init_shadow_call_stack // Set shadow call stack
+#endif
+
str_l x21, __fdt_pointer, x5 // Save FDT pointer
ldr_l x4, kimage_vaddr // Save the offset between
@@ -731,6 +736,10 @@ __secondary_switched:
ldr x2, [x0, #CPU_BOOT_TASK]
cbz x2, __secondary_too_slow
msr sp_el0, x2
+#ifdef CONFIG_SHADOW_CALL_STACK
+ ldr x18, [x2, #TSK_TI_SCS] // set shadow call stack
+ str xzr, [x2, #TSK_TI_SCS] // limit visibility of saved SCS
+#endif
mov x29, #0
mov x30, #0
b secondary_start_kernel
@@ -21,6 +21,7 @@
#include <linux/vmalloc.h>
#include <asm/daifflags.h>
#include <asm/vmap_stack.h>
+#include <asm/scs.h>
unsigned long irq_err_count;
@@ -63,6 +64,7 @@ static void init_irq_stacks(void)
void __init init_IRQ(void)
{
init_irq_stacks();
+ scs_init_irq();
irqchip_init();
if (!handle_arch_irq)
panic("No interrupt controller found.");
@@ -52,6 +52,7 @@
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/pointer_auth.h>
+#include <asm/scs.h>
#include <asm/stacktrace.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
@@ -514,6 +515,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
uao_thread_switch(next);
ptrauth_thread_switch(next);
ssbs_thread_switch(next);
+ scs_overflow_check(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
new file mode 100644
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Shadow Call Stack support.
+ *
+ * Copyright (C) 2019 Google LLC
+ */
+
+#include <linux/percpu.h>
+#include <linux/vmalloc.h>
+#include <asm/pgtable.h>
+#include <asm/scs.h>
+
+DEFINE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
+
+#ifndef CONFIG_SHADOW_CALL_STACK_VMAP
+DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], irq_shadow_call_stack)
+ __aligned(SCS_SIZE);
+#endif
+
+void scs_init_irq(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+#ifdef CONFIG_SHADOW_CALL_STACK_VMAP
+ unsigned long *p;
+
+ p = __vmalloc_node_range(PAGE_SIZE, SCS_SIZE,
+ VMALLOC_START, VMALLOC_END,
+ GFP_SCS, PAGE_KERNEL,
+ 0, cpu_to_node(cpu),
+ __builtin_return_address(0));
+
+ per_cpu(irq_shadow_call_stack_ptr, cpu) = p;
+#else
+ per_cpu(irq_shadow_call_stack_ptr, cpu) =
+ per_cpu(irq_shadow_call_stack, cpu);
+#endif /* CONFIG_SHADOW_CALL_STACK_VMAP */
+ }
+}
@@ -46,6 +46,7 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
+#include <asm/scs.h>
#include <asm/smp_plat.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -358,6 +359,9 @@ void cpu_die(void)
{
unsigned int cpu = smp_processor_id();
+ /* Save the shadow stack pointer before exiting the idle task */
+ scs_save(current);
+
idle_task_exit();
local_daif_mask();