diff mbox series

[v3,2/3] kasan, arm64: implement stack_trace_save_shadow

Message ID 78cd352296ceb14da1d0136ff7d0a6818e594ab7.1649877511.git.andreyknvl@google.com (mailing list archive)
State New
Headers show
Series kasan, arm64, scs: collect stack traces from Shadow Call Stack | expand

Commit Message

andrey.konovalov@linux.dev April 13, 2022, 7:26 p.m. UTC
From: Andrey Konovalov <andreyknvl@google.com>

Implement stack_trace_save_shadow() that collects stack traces based on
the Shadow Call Stack (SCS) for arm64 by copiing the frames from SCS.

The implementation is best-effort and thus has limitations.

stack_trace_save_shadow() fully handles task and softirq contexts, which
are both processed on the per-task SCS.

For hardirqs, the support is limited: stack_trace_save_shadow() does not
collect the task part of the stack trace. For KASAN, this is not a problem,
as stack depot only saves the interrupt part of the stack anyway.

Otherwise, stack_trace_save_shadow() also takes a best-effort approach
with a focus on performance. Thus, it:

- Does not try to collect stack traces from other exceptions like SDEI.
- Does not try to recover frames modified by KRETPROBES or by FTRACE.

However, stack_trace_save_shadow() does strip PTR_AUTH tags to avoid
leaking them in stack traces.

The -ENOSYS return value is deliberatly used to match
stack_trace_save_tsk_reliable().

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 mm/kasan/common.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 62 insertions(+)

Comments

Mark Rutland April 14, 2022, 12:46 p.m. UTC | #1
On Wed, Apr 13, 2022 at 09:26:45PM +0200, andrey.konovalov@linux.dev wrote:
> From: Andrey Konovalov <andreyknvl@google.com>
> 
> Implement stack_trace_save_shadow() that collects stack traces based on
> the Shadow Call Stack (SCS) for arm64 by copiing the frames from SCS.
> 
> The implementation is best-effort and thus has limitations.
> 
> stack_trace_save_shadow() fully handles task and softirq contexts, which
> are both processed on the per-task SCS.
> 
> For hardirqs, the support is limited: stack_trace_save_shadow() does not
> collect the task part of the stack trace. For KASAN, this is not a problem,
> as stack depot only saves the interrupt part of the stack anyway.
> 
> Otherwise, stack_trace_save_shadow() also takes a best-effort approach
> with a focus on performance. Thus, it:
> 
> - Does not try to collect stack traces from other exceptions like SDEI.
> - Does not try to recover frames modified by KRETPROBES or by FTRACE.
> 
> However, stack_trace_save_shadow() does strip PTR_AUTH tags to avoid
> leaking them in stack traces.
> 
> The -ENOSYS return value is deliberatly used to match
> stack_trace_save_tsk_reliable().
> 
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> ---
>  mm/kasan/common.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 62 insertions(+)

As things stand, NAK to this patch, for the reasons I have laid out in
my replies to earlier postings and to my reply to the cover letter of
this posting.

To be clear, that NAK applies regardless of where this is placed within
the kernel tree. If we *really* need to have a special unwinder, that
should live under arch/arm64/, but my first objection is that it is not
necessary.

I am more than happy to extend the existing unwinder with some options
to minimize overhead (e.g. to stop dumping at an exception boundary),
since that sounds useful to you, and I know is relatively simple to
implement.

Thanks,
Mark.

> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index d9079ec11f31..23b30fa6e270 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -30,6 +30,68 @@
>  #include "kasan.h"
>  #include "../slab.h"
>  
> +#ifdef CONFIG_SHADOW_CALL_STACK
> +#include <linux/scs.h>
> +#include <asm/scs.h>
> +
> +/*
> + * Collect the stack trace from the Shadow Call Stack in a best-effort manner:
> + *
> + * - Do not collect the task part of the stack trace when in a hardirq.
> + * - Do not collect stack traces from other exception levels like SDEI.
> + * - Do not recover frames modified by KRETPROBES or by FTRACE.
> + *
> + * Note that marking the function with __noscs leads to unnacceptable
> + * performance impact, as helper functions stop being inlined.
> + */
> +static inline int stack_trace_save_shadow(unsigned long *store,
> +					  unsigned int size)
> +{
> +	unsigned long *scs_top, *scs_base, *frame;
> +	unsigned int len = 0;
> +
> +	/* Get the SCS base. */
> +	if (in_task() || in_serving_softirq()) {
> +		/* Softirqs reuse the task SCS area. */
> +		scs_base = task_scs(current);
> +	} else if (in_hardirq()) {
> +		/* Hardirqs use a per-CPU SCS area. */
> +		scs_base = *this_cpu_ptr(&irq_shadow_call_stack_ptr);
> +	} else {
> +		/* Ignore other exception levels. */
> +		return 0;
> +	}
> +
> +	/*
> +	 * Get the SCS pointer.
> +	 *
> +	 * Note that this assembly might be placed before the function's
> +	 * prologue. In this case, the last stack frame will be lost. This is
> +	 * acceptable: the lost frame will correspond to an internal KASAN
> +	 * function, which is not relevant to identify the external call site.
> +	 */
> +	asm volatile("mov %0, x18" : "=&r" (scs_top));
> +
> +	/* The top SCS slot is empty. */
> +	scs_top -= 1;
> +
> +	for (frame = scs_top; frame >= scs_base; frame--) {
> +		if (len >= size)
> +			break;
> +		/* Do not leak PTR_AUTH tags in stack traces. */
> +		store[len++] = ptrauth_strip_insn_pac(*frame);
> +	}
> +
> +	return len;
> +}
> +#else /* CONFIG_SHADOW_CALL_STACK */
> +static inline int stack_trace_save_shadow(unsigned long *store,
> +					  unsigned int size)
> +{
> +	return -ENOSYS;
> +}
> +#endif /* CONFIG_SHADOW_CALL_STACK */
> +
>  depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
>  {
>  	unsigned long entries[KASAN_STACK_DEPTH];
> -- 
> 2.25.1
>
diff mbox series

Patch

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index d9079ec11f31..23b30fa6e270 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -30,6 +30,68 @@ 
 #include "kasan.h"
 #include "../slab.h"
 
+#ifdef CONFIG_SHADOW_CALL_STACK
+#include <linux/scs.h>
+#include <asm/scs.h>
+
+/*
+ * Collect the stack trace from the Shadow Call Stack in a best-effort manner:
+ *
+ * - Do not collect the task part of the stack trace when in a hardirq.
+ * - Do not collect stack traces from other exception levels like SDEI.
+ * - Do not recover frames modified by KRETPROBES or by FTRACE.
+ *
+ * Note that marking the function with __noscs leads to unnacceptable
+ * performance impact, as helper functions stop being inlined.
+ */
+static inline int stack_trace_save_shadow(unsigned long *store,
+					  unsigned int size)
+{
+	unsigned long *scs_top, *scs_base, *frame;
+	unsigned int len = 0;
+
+	/* Get the SCS base. */
+	if (in_task() || in_serving_softirq()) {
+		/* Softirqs reuse the task SCS area. */
+		scs_base = task_scs(current);
+	} else if (in_hardirq()) {
+		/* Hardirqs use a per-CPU SCS area. */
+		scs_base = *this_cpu_ptr(&irq_shadow_call_stack_ptr);
+	} else {
+		/* Ignore other exception levels. */
+		return 0;
+	}
+
+	/*
+	 * Get the SCS pointer.
+	 *
+	 * Note that this assembly might be placed before the function's
+	 * prologue. In this case, the last stack frame will be lost. This is
+	 * acceptable: the lost frame will correspond to an internal KASAN
+	 * function, which is not relevant to identify the external call site.
+	 */
+	asm volatile("mov %0, x18" : "=&r" (scs_top));
+
+	/* The top SCS slot is empty. */
+	scs_top -= 1;
+
+	for (frame = scs_top; frame >= scs_base; frame--) {
+		if (len >= size)
+			break;
+		/* Do not leak PTR_AUTH tags in stack traces. */
+		store[len++] = ptrauth_strip_insn_pac(*frame);
+	}
+
+	return len;
+}
+#else /* CONFIG_SHADOW_CALL_STACK */
+static inline int stack_trace_save_shadow(unsigned long *store,
+					  unsigned int size)
+{
+	return -ENOSYS;
+}
+#endif /* CONFIG_SHADOW_CALL_STACK */
+
 depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
 {
 	unsigned long entries[KASAN_STACK_DEPTH];