@@ -9,6 +9,7 @@
#ifdef CONFIG_SHADOW_CALL_STACK
extern void scs_init_irq(void);
+extern int scs_init_sdei(void);
static __always_inline void scs_save(struct task_struct *tsk)
{
@@ -27,6 +28,7 @@ static inline void scs_overflow_check(struct task_struct *tsk)
#else /* CONFIG_SHADOW_CALL_STACK */
static inline void scs_init_irq(void) {}
+static inline int scs_init_sdei(void) { return 0; }
static inline void scs_save(struct task_struct *tsk) {}
static inline void scs_overflow_check(struct task_struct *tsk) {}
@@ -1050,13 +1050,16 @@ ENTRY(__sdei_asm_handler)
mov x19, x1
+#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
+ ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
+#endif
+
#ifdef CONFIG_VMAP_STACK
/*
* entry.S may have been using sp as a scratch register, find whether
* this is a normal or critical event and switch to the appropriate
* stack for this CPU.
*/
- ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
cbnz w4, 1f
ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
b 2f
@@ -1066,6 +1069,15 @@ ENTRY(__sdei_asm_handler)
mov sp, x5
#endif
+#ifdef CONFIG_SHADOW_CALL_STACK
+ /* Use a separate shadow call stack for normal and critical events */
+ cbnz w4, 3f
+ ldr_this_cpu dst=x18, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
+ b 4f
+3: ldr_this_cpu dst=x18, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
+4:
+#endif
+
/*
* We may have interrupted userspace, or a guest, or exit-from or
* return-to either of these. We can't trust sp_el0, restore it.
@@ -10,31 +10,105 @@
#include <asm/pgtable.h>
#include <asm/scs.h>
-DEFINE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
+#define DECLARE_SCS(name) \
+ DECLARE_PER_CPU(unsigned long *, name ## _ptr); \
+ DECLARE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name)
-#ifndef CONFIG_SHADOW_CALL_STACK_VMAP
-DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], irq_shadow_call_stack)
- __aligned(SCS_SIZE);
+#ifdef CONFIG_SHADOW_CALL_STACK_VMAP
+#define DEFINE_SCS(name) \
+ DEFINE_PER_CPU(unsigned long *, name ## _ptr)
+#else
+/* Allocate a static per-CPU shadow stack */
+#define DEFINE_SCS(name) \
+ DEFINE_PER_CPU(unsigned long *, name ## _ptr); \
+ DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \
+ __aligned(SCS_SIZE)
+#endif /* CONFIG_SHADOW_CALL_STACK_VMAP */
+
+DECLARE_SCS(irq_shadow_call_stack);
+DECLARE_SCS(sdei_shadow_call_stack_normal);
+DECLARE_SCS(sdei_shadow_call_stack_critical);
+
+DEFINE_SCS(irq_shadow_call_stack);
+#ifdef CONFIG_ARM_SDE_INTERFACE
+DEFINE_SCS(sdei_shadow_call_stack_normal);
+DEFINE_SCS(sdei_shadow_call_stack_critical);
#endif
+static int scs_alloc_percpu(unsigned long * __percpu *ptr, int cpu)
+{
+ unsigned long *p;
+
+ p = __vmalloc_node_range(PAGE_SIZE, SCS_SIZE,
+ VMALLOC_START, VMALLOC_END,
+ GFP_SCS, PAGE_KERNEL,
+ 0, cpu_to_node(cpu),
+ __builtin_return_address(0));
+
+ if (!p)
+ return -ENOMEM;
+ per_cpu(*ptr, cpu) = p;
+
+ return 0;
+}
+
+static void scs_free_percpu(unsigned long * __percpu *ptr, int cpu)
+{
+ unsigned long *p = per_cpu(*ptr, cpu);
+
+ if (p) {
+ per_cpu(*ptr, cpu) = NULL;
+ vfree(p);
+ }
+}
+
+static void scs_free_sdei(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ scs_free_percpu(&sdei_shadow_call_stack_normal_ptr, cpu);
+ scs_free_percpu(&sdei_shadow_call_stack_critical_ptr, cpu);
+ }
+}
+
void scs_init_irq(void)
{
int cpu;
for_each_possible_cpu(cpu) {
-#ifdef CONFIG_SHADOW_CALL_STACK_VMAP
- unsigned long *p;
+ if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK_VMAP))
+ WARN_ON(scs_alloc_percpu(&irq_shadow_call_stack_ptr,
+ cpu));
+ else
+ per_cpu(irq_shadow_call_stack_ptr, cpu) =
+ per_cpu(irq_shadow_call_stack, cpu);
+ }
+}
- p = __vmalloc_node_range(PAGE_SIZE, SCS_SIZE,
- VMALLOC_START, VMALLOC_END,
- GFP_SCS, PAGE_KERNEL,
- 0, cpu_to_node(cpu),
- __builtin_return_address(0));
+int scs_init_sdei(void)
+{
+ int cpu;
- per_cpu(irq_shadow_call_stack_ptr, cpu) = p;
-#else
- per_cpu(irq_shadow_call_stack_ptr, cpu) =
- per_cpu(irq_shadow_call_stack, cpu);
-#endif /* CONFIG_SHADOW_CALL_STACK_VMAP */
+ if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
+ return 0;
+
+ for_each_possible_cpu(cpu) {
+ if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK_VMAP)) {
+ if (scs_alloc_percpu(
+ &sdei_shadow_call_stack_normal_ptr, cpu) ||
+ scs_alloc_percpu(
+ &sdei_shadow_call_stack_critical_ptr, cpu)) {
+ scs_free_sdei();
+ return -ENOMEM;
+ }
+ } else {
+ per_cpu(sdei_shadow_call_stack_normal_ptr, cpu) =
+ per_cpu(sdei_shadow_call_stack_normal, cpu);
+ per_cpu(sdei_shadow_call_stack_critical_ptr, cpu) =
+ per_cpu(sdei_shadow_call_stack_critical, cpu);
+ }
}
+
+ return 0;
}
@@ -13,6 +13,7 @@
#include <asm/kprobes.h>
#include <asm/mmu.h>
#include <asm/ptrace.h>
+#include <asm/scs.h>
#include <asm/sections.h>
#include <asm/stacktrace.h>
#include <asm/sysreg.h>
@@ -162,6 +163,12 @@ unsigned long sdei_arch_get_entry_point(int conduit)
return 0;
}
+ if (scs_init_sdei()) {
+ if (IS_ENABLED(CONFIG_VMAP_STACK))
+ free_sdei_stacks();
+ return 0;
+ }
+
sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0