diff mbox series

[1/2] arm64: stacktrace: factor out kernel unwind state

Message ID 20231124110511.2795958-2-mark.rutland@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: stacktrace: add kunwind_stack_walk() | expand

Commit Message

Mark Rutland Nov. 24, 2023, 11:05 a.m. UTC
On arm64 we share some unwinding code between the regular kernel
unwinder and the KVM hyp unwinder. Some of this common code only matters
to the regular unwinder, e.g. the `kr_cur` and `task` fields in the
common struct unwind_state.

We're likely to add more state which only matters for regular kernel
unwinding (or only for hyp unwinding). In preparation for such changes,
this patch factors out the kernel-specific state into a new struct
kunwind_state, and updates the kernel unwind code accordingly.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Puranjay Mohan <puranjay12@gmail.com>
Cc: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/stacktrace/common.h |  19 +---
 arch/arm64/include/asm/stacktrace/nvhe.h   |   2 +-
 arch/arm64/kernel/stacktrace.c             | 113 +++++++++++++--------
 3 files changed, 74 insertions(+), 60 deletions(-)

Comments

Kalesh Singh Nov. 27, 2023, 6:28 p.m. UTC | #1
On Fri, Nov 24, 2023 at 3:05 AM Mark Rutland <mark.rutland@arm.com> wrote:
>
> On arm64 we share some unwinding code between the regular kernel
> unwinder and the KVM hyp unwinder. Some of this common code only matters
> to the regular unwinder, e.g. the `kr_cur` and `task` fields in the
> common struct unwind_state.
>
> We're likely to add more state which only matters for regular kernel
> unwinding (or only for hyp unwinding). In preparation for such changes,
> this patch factors out the kernel-specific state into a new struct
> kunwind_state, and updates the kernel unwind code accordingly.
>
> There should be no functional change as a result of this patch.

Reviewed-by: Kalesh Singh <kaleshsingh@google.com>

Thanks,
Kalesh

>
> Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Kalesh Singh <kaleshsingh@google.com>
> Cc: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
> Cc: Mark Brown <broonie@kernel.org>
> Cc: Puranjay Mohan <puranjay12@gmail.com>
> Cc: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/include/asm/stacktrace/common.h |  19 +---
>  arch/arm64/include/asm/stacktrace/nvhe.h   |   2 +-
>  arch/arm64/kernel/stacktrace.c             | 113 +++++++++++++--------
>  3 files changed, 74 insertions(+), 60 deletions(-)
>
> diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h
> index 508f734de46ee..f63dc654e545f 100644
> --- a/arch/arm64/include/asm/stacktrace/common.h
> +++ b/arch/arm64/include/asm/stacktrace/common.h
> @@ -9,7 +9,6 @@
>  #ifndef __ASM_STACKTRACE_COMMON_H
>  #define __ASM_STACKTRACE_COMMON_H
>
> -#include <linux/kprobes.h>
>  #include <linux/types.h>
>
>  struct stack_info {
> @@ -23,12 +22,6 @@ struct stack_info {
>   * @fp:          The fp value in the frame record (or the real fp)
>   * @pc:          The lr value in the frame record (or the real lr)
>   *
> - * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
> - *               associated with the most recently encountered replacement lr
> - *               value.
> - *
> - * @task:        The task being unwound.
> - *
>   * @stack:       The stack currently being unwound.
>   * @stacks:      An array of stacks which can be unwound.
>   * @nr_stacks:   The number of stacks in @stacks.
> @@ -36,10 +29,6 @@ struct stack_info {
>  struct unwind_state {
>         unsigned long fp;
>         unsigned long pc;
> -#ifdef CONFIG_KRETPROBES
> -       struct llist_node *kr_cur;
> -#endif
> -       struct task_struct *task;
>
>         struct stack_info stack;
>         struct stack_info *stacks;
> @@ -66,14 +55,8 @@ static inline bool stackinfo_on_stack(const struct stack_info *info,
>         return true;
>  }
>
> -static inline void unwind_init_common(struct unwind_state *state,
> -                                     struct task_struct *task)
> +static inline void unwind_init_common(struct unwind_state *state)
>  {
> -       state->task = task;
> -#ifdef CONFIG_KRETPROBES
> -       state->kr_cur = NULL;
> -#endif
> -
>         state->stack = stackinfo_get_unknown();
>  }
>
> diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
> index 25ab83a315a76..44759281d0d43 100644
> --- a/arch/arm64/include/asm/stacktrace/nvhe.h
> +++ b/arch/arm64/include/asm/stacktrace/nvhe.h
> @@ -31,7 +31,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
>                                         unsigned long fp,
>                                         unsigned long pc)
>  {
> -       unwind_init_common(state, NULL);
> +       unwind_init_common(state);
>
>         state->fp = fp;
>         state->pc = pc;
> diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
> index 17f66a74c745c..aafc89192787a 100644
> --- a/arch/arm64/kernel/stacktrace.c
> +++ b/arch/arm64/kernel/stacktrace.c
> @@ -8,6 +8,7 @@
>  #include <linux/efi.h>
>  #include <linux/export.h>
>  #include <linux/ftrace.h>
> +#include <linux/kprobes.h>
>  #include <linux/sched.h>
>  #include <linux/sched/debug.h>
>  #include <linux/sched/task_stack.h>
> @@ -18,6 +19,31 @@
>  #include <asm/stack_pointer.h>
>  #include <asm/stacktrace.h>
>
> +/*
> + * Kernel unwind state
> + *
> + * @common:      Common unwind state.
> + * @task:        The task being unwound.
> + * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
> + *               associated with the most recently encountered replacement lr
> + *               value.
> + */
> +struct kunwind_state {
> +       struct unwind_state common;
> +       struct task_struct *task;
> +#ifdef CONFIG_KRETPROBES
> +       struct llist_node *kr_cur;
> +#endif
> +};
> +
> +static __always_inline void
> +kunwind_init(struct kunwind_state *state,
> +            struct task_struct *task)
> +{
> +       unwind_init_common(&state->common);
> +       state->task = task;
> +}
> +
>  /*
>   * Start an unwind from a pt_regs.
>   *
> @@ -26,13 +52,13 @@
>   * The regs must be on a stack currently owned by the calling task.
>   */
>  static __always_inline void
> -unwind_init_from_regs(struct unwind_state *state,
> -                     struct pt_regs *regs)
> +kunwind_init_from_regs(struct kunwind_state *state,
> +                      struct pt_regs *regs)
>  {
> -       unwind_init_common(state, current);
> +       kunwind_init(state, current);
>
> -       state->fp = regs->regs[29];
> -       state->pc = regs->pc;
> +       state->common.fp = regs->regs[29];
> +       state->common.pc = regs->pc;
>  }
>
>  /*
> @@ -44,12 +70,12 @@ unwind_init_from_regs(struct unwind_state *state,
>   * The function which invokes this must be noinline.
>   */
>  static __always_inline void
> -unwind_init_from_caller(struct unwind_state *state)
> +kunwind_init_from_caller(struct kunwind_state *state)
>  {
> -       unwind_init_common(state, current);
> +       kunwind_init(state, current);
>
> -       state->fp = (unsigned long)__builtin_frame_address(1);
> -       state->pc = (unsigned long)__builtin_return_address(0);
> +       state->common.fp = (unsigned long)__builtin_frame_address(1);
> +       state->common.pc = (unsigned long)__builtin_return_address(0);
>  }
>
>  /*
> @@ -63,35 +89,38 @@ unwind_init_from_caller(struct unwind_state *state)
>   * call this for the current task.
>   */
>  static __always_inline void
> -unwind_init_from_task(struct unwind_state *state,
> -                     struct task_struct *task)
> +kunwind_init_from_task(struct kunwind_state *state,
> +                      struct task_struct *task)
>  {
> -       unwind_init_common(state, task);
> +       kunwind_init(state, task);
>
> -       state->fp = thread_saved_fp(task);
> -       state->pc = thread_saved_pc(task);
> +       state->common.fp = thread_saved_fp(task);
> +       state->common.pc = thread_saved_pc(task);
>  }
>
>  static __always_inline int
> -unwind_recover_return_address(struct unwind_state *state)
> +kunwind_recover_return_address(struct kunwind_state *state)
>  {
>  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
>         if (state->task->ret_stack &&
> -           (state->pc == (unsigned long)return_to_handler)) {
> +           (state->common.pc == (unsigned long)return_to_handler)) {
>                 unsigned long orig_pc;
> -               orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc,
> -                                               (void *)state->fp);
> -               if (WARN_ON_ONCE(state->pc == orig_pc))
> +               orig_pc = ftrace_graph_ret_addr(state->task, NULL,
> +                                               state->common.pc,
> +                                               (void *)state->common.fp);
> +               if (WARN_ON_ONCE(state->common.pc == orig_pc))
>                         return -EINVAL;
> -               state->pc = orig_pc;
> +               state->common.pc = orig_pc;
>         }
>  #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
>
>  #ifdef CONFIG_KRETPROBES
> -       if (is_kretprobe_trampoline(state->pc)) {
> -               state->pc = kretprobe_find_ret_addr(state->task,
> -                                                   (void *)state->fp,
> -                                                   &state->kr_cur);
> +       if (is_kretprobe_trampoline(state->common.pc)) {
> +               unsigned long orig_pc;
> +               orig_pc = kretprobe_find_ret_addr(state->task,
> +                                                 (void *)state->common.fp,
> +                                                 &state->kr_cur);
> +               state->common.pc = orig_pc;
>         }
>  #endif /* CONFIG_KRETPROBES */
>
> @@ -106,38 +135,38 @@ unwind_recover_return_address(struct unwind_state *state)
>   * and the location (but not the fp value) of B.
>   */
>  static __always_inline int
> -unwind_next(struct unwind_state *state)
> +kunwind_next(struct kunwind_state *state)
>  {
>         struct task_struct *tsk = state->task;
> -       unsigned long fp = state->fp;
> +       unsigned long fp = state->common.fp;
>         int err;
>
>         /* Final frame; nothing to unwind */
>         if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
>                 return -ENOENT;
>
> -       err = unwind_next_frame_record(state);
> +       err = unwind_next_frame_record(&state->common);
>         if (err)
>                 return err;
>
> -       state->pc = ptrauth_strip_kernel_insn_pac(state->pc);
> +       state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
>
> -       return unwind_recover_return_address(state);
> +       return kunwind_recover_return_address(state);
>  }
>
>  static __always_inline void
> -unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry,
> -       void *cookie)
> +do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
> +          void *cookie)
>  {
> -       if (unwind_recover_return_address(state))
> +       if (kunwind_recover_return_address(state))
>                 return;
>
>         while (1) {
>                 int ret;
>
> -               if (!consume_entry(cookie, state->pc))
> +               if (!consume_entry(cookie, state->common.pc))
>                         break;
> -               ret = unwind_next(state);
> +               ret = kunwind_next(state);
>                 if (ret < 0)
>                         break;
>         }
> @@ -190,22 +219,24 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
>                 STACKINFO_EFI,
>  #endif
>         };
> -       struct unwind_state state = {
> -               .stacks = stacks,
> -               .nr_stacks = ARRAY_SIZE(stacks),
> +       struct kunwind_state state = {
> +               .common = {
> +                       .stacks = stacks,
> +                       .nr_stacks = ARRAY_SIZE(stacks),
> +               },
>         };
>
>         if (regs) {
>                 if (task != current)
>                         return;
> -               unwind_init_from_regs(&state, regs);
> +               kunwind_init_from_regs(&state, regs);
>         } else if (task == current) {
> -               unwind_init_from_caller(&state);
> +               kunwind_init_from_caller(&state);
>         } else {
> -               unwind_init_from_task(&state, task);
> +               kunwind_init_from_task(&state, task);
>         }
>
> -       unwind(&state, consume_entry, cookie);
> +       do_kunwind(&state, consume_entry, cookie);
>  }
>
>  static bool dump_backtrace_entry(void *arg, unsigned long where)
> --
> 2.30.2
>
Puranjay Mohan Nov. 28, 2023, 8:52 a.m. UTC | #2
Mark Rutland <mark.rutland@arm.com> writes:

> On arm64 we share some unwinding code between the regular kernel
> unwinder and the KVM hyp unwinder. Some of this common code only matters
> to the regular unwinder, e.g. the `kr_cur` and `task` fields in the
> common struct unwind_state.
>
> We're likely to add more state which only matters for regular kernel
> unwinding (or only for hyp unwinding). In preparation for such changes,
> this patch factors out the kernel-specific state into a new struct
> kunwind_state, and updates the kernel unwind code accordingly.
>
> There should be no functional change as a result of this patch.
>

Reviewed-by: Puranjay Mohan <puranjay12@gmail.com>

Thanks,
Puranjay

> Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Kalesh Singh <kaleshsingh@google.com>
> Cc: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
> Cc: Mark Brown <broonie@kernel.org>
> Cc: Puranjay Mohan <puranjay12@gmail.com>
> Cc: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/include/asm/stacktrace/common.h |  19 +---
>  arch/arm64/include/asm/stacktrace/nvhe.h   |   2 +-
>  arch/arm64/kernel/stacktrace.c             | 113 +++++++++++++--------
>  3 files changed, 74 insertions(+), 60 deletions(-)
>
> diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h
> index 508f734de46ee..f63dc654e545f 100644
> --- a/arch/arm64/include/asm/stacktrace/common.h
> +++ b/arch/arm64/include/asm/stacktrace/common.h
> @@ -9,7 +9,6 @@
>  #ifndef __ASM_STACKTRACE_COMMON_H
>  #define __ASM_STACKTRACE_COMMON_H
>  
> -#include <linux/kprobes.h>
>  #include <linux/types.h>
>  
>  struct stack_info {
> @@ -23,12 +22,6 @@ struct stack_info {
>   * @fp:          The fp value in the frame record (or the real fp)
>   * @pc:          The lr value in the frame record (or the real lr)
>   *
> - * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
> - *               associated with the most recently encountered replacement lr
> - *               value.
> - *
> - * @task:        The task being unwound.
> - *
>   * @stack:       The stack currently being unwound.
>   * @stacks:      An array of stacks which can be unwound.
>   * @nr_stacks:   The number of stacks in @stacks.
> @@ -36,10 +29,6 @@ struct stack_info {
>  struct unwind_state {
>  	unsigned long fp;
>  	unsigned long pc;
> -#ifdef CONFIG_KRETPROBES
> -	struct llist_node *kr_cur;
> -#endif
> -	struct task_struct *task;
>  
>  	struct stack_info stack;
>  	struct stack_info *stacks;
> @@ -66,14 +55,8 @@ static inline bool stackinfo_on_stack(const struct stack_info *info,
>  	return true;
>  }
>  
> -static inline void unwind_init_common(struct unwind_state *state,
> -				      struct task_struct *task)
> +static inline void unwind_init_common(struct unwind_state *state)
>  {
> -	state->task = task;
> -#ifdef CONFIG_KRETPROBES
> -	state->kr_cur = NULL;
> -#endif
> -
>  	state->stack = stackinfo_get_unknown();
>  }
>  
> diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
> index 25ab83a315a76..44759281d0d43 100644
> --- a/arch/arm64/include/asm/stacktrace/nvhe.h
> +++ b/arch/arm64/include/asm/stacktrace/nvhe.h
> @@ -31,7 +31,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
>  					unsigned long fp,
>  					unsigned long pc)
>  {
> -	unwind_init_common(state, NULL);
> +	unwind_init_common(state);
>  
>  	state->fp = fp;
>  	state->pc = pc;
> diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
> index 17f66a74c745c..aafc89192787a 100644
> --- a/arch/arm64/kernel/stacktrace.c
> +++ b/arch/arm64/kernel/stacktrace.c
> @@ -8,6 +8,7 @@
>  #include <linux/efi.h>
>  #include <linux/export.h>
>  #include <linux/ftrace.h>
> +#include <linux/kprobes.h>
>  #include <linux/sched.h>
>  #include <linux/sched/debug.h>
>  #include <linux/sched/task_stack.h>
> @@ -18,6 +19,31 @@
>  #include <asm/stack_pointer.h>
>  #include <asm/stacktrace.h>
>  
> +/*
> + * Kernel unwind state
> + *
> + * @common:      Common unwind state.
> + * @task:        The task being unwound.
> + * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
> + *               associated with the most recently encountered replacement lr
> + *               value.
> + */
> +struct kunwind_state {
> +	struct unwind_state common;
> +	struct task_struct *task;
> +#ifdef CONFIG_KRETPROBES
> +	struct llist_node *kr_cur;
> +#endif
> +};
> +
> +static __always_inline void
> +kunwind_init(struct kunwind_state *state,
> +	     struct task_struct *task)
> +{
> +	unwind_init_common(&state->common);
> +	state->task = task;
> +}
> +
>  /*
>   * Start an unwind from a pt_regs.
>   *
> @@ -26,13 +52,13 @@
>   * The regs must be on a stack currently owned by the calling task.
>   */
>  static __always_inline void
> -unwind_init_from_regs(struct unwind_state *state,
> -		      struct pt_regs *regs)
> +kunwind_init_from_regs(struct kunwind_state *state,
> +		       struct pt_regs *regs)
>  {
> -	unwind_init_common(state, current);
> +	kunwind_init(state, current);
>  
> -	state->fp = regs->regs[29];
> -	state->pc = regs->pc;
> +	state->common.fp = regs->regs[29];
> +	state->common.pc = regs->pc;
>  }
>  
>  /*
> @@ -44,12 +70,12 @@ unwind_init_from_regs(struct unwind_state *state,
>   * The function which invokes this must be noinline.
>   */
>  static __always_inline void
> -unwind_init_from_caller(struct unwind_state *state)
> +kunwind_init_from_caller(struct kunwind_state *state)
>  {
> -	unwind_init_common(state, current);
> +	kunwind_init(state, current);
>  
> -	state->fp = (unsigned long)__builtin_frame_address(1);
> -	state->pc = (unsigned long)__builtin_return_address(0);
> +	state->common.fp = (unsigned long)__builtin_frame_address(1);
> +	state->common.pc = (unsigned long)__builtin_return_address(0);
>  }
>  
>  /*
> @@ -63,35 +89,38 @@ unwind_init_from_caller(struct unwind_state *state)
>   * call this for the current task.
>   */
>  static __always_inline void
> -unwind_init_from_task(struct unwind_state *state,
> -		      struct task_struct *task)
> +kunwind_init_from_task(struct kunwind_state *state,
> +		       struct task_struct *task)
>  {
> -	unwind_init_common(state, task);
> +	kunwind_init(state, task);
>  
> -	state->fp = thread_saved_fp(task);
> -	state->pc = thread_saved_pc(task);
> +	state->common.fp = thread_saved_fp(task);
> +	state->common.pc = thread_saved_pc(task);
>  }
>  
>  static __always_inline int
> -unwind_recover_return_address(struct unwind_state *state)
> +kunwind_recover_return_address(struct kunwind_state *state)
>  {
>  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
>  	if (state->task->ret_stack &&
> -	    (state->pc == (unsigned long)return_to_handler)) {
> +	    (state->common.pc == (unsigned long)return_to_handler)) {
>  		unsigned long orig_pc;
> -		orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc,
> -						(void *)state->fp);
> -		if (WARN_ON_ONCE(state->pc == orig_pc))
> +		orig_pc = ftrace_graph_ret_addr(state->task, NULL,
> +						state->common.pc,
> +						(void *)state->common.fp);
> +		if (WARN_ON_ONCE(state->common.pc == orig_pc))
>  			return -EINVAL;
> -		state->pc = orig_pc;
> +		state->common.pc = orig_pc;
>  	}
>  #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
>  
>  #ifdef CONFIG_KRETPROBES
> -	if (is_kretprobe_trampoline(state->pc)) {
> -		state->pc = kretprobe_find_ret_addr(state->task,
> -						    (void *)state->fp,
> -						    &state->kr_cur);
> +	if (is_kretprobe_trampoline(state->common.pc)) {
> +		unsigned long orig_pc;
> +		orig_pc = kretprobe_find_ret_addr(state->task,
> +						  (void *)state->common.fp,
> +						  &state->kr_cur);
> +		state->common.pc = orig_pc;
>  	}
>  #endif /* CONFIG_KRETPROBES */
>  
> @@ -106,38 +135,38 @@ unwind_recover_return_address(struct unwind_state *state)
>   * and the location (but not the fp value) of B.
>   */
>  static __always_inline int
> -unwind_next(struct unwind_state *state)
> +kunwind_next(struct kunwind_state *state)
>  {
>  	struct task_struct *tsk = state->task;
> -	unsigned long fp = state->fp;
> +	unsigned long fp = state->common.fp;
>  	int err;
>  
>  	/* Final frame; nothing to unwind */
>  	if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
>  		return -ENOENT;
>  
> -	err = unwind_next_frame_record(state);
> +	err = unwind_next_frame_record(&state->common);
>  	if (err)
>  		return err;
>  
> -	state->pc = ptrauth_strip_kernel_insn_pac(state->pc);
> +	state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
>  
> -	return unwind_recover_return_address(state);
> +	return kunwind_recover_return_address(state);
>  }
>  
>  static __always_inline void
> -unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry,
> -       void *cookie)
> +do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
> +	   void *cookie)
>  {
> -	if (unwind_recover_return_address(state))
> +	if (kunwind_recover_return_address(state))
>  		return;
>  
>  	while (1) {
>  		int ret;
>  
> -		if (!consume_entry(cookie, state->pc))
> +		if (!consume_entry(cookie, state->common.pc))
>  			break;
> -		ret = unwind_next(state);
> +		ret = kunwind_next(state);
>  		if (ret < 0)
>  			break;
>  	}
> @@ -190,22 +219,24 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
>  		STACKINFO_EFI,
>  #endif
>  	};
> -	struct unwind_state state = {
> -		.stacks = stacks,
> -		.nr_stacks = ARRAY_SIZE(stacks),
> +	struct kunwind_state state = {
> +		.common = {
> +			.stacks = stacks,
> +			.nr_stacks = ARRAY_SIZE(stacks),
> +		},
>  	};
>  
>  	if (regs) {
>  		if (task != current)
>  			return;
> -		unwind_init_from_regs(&state, regs);
> +		kunwind_init_from_regs(&state, regs);
>  	} else if (task == current) {
> -		unwind_init_from_caller(&state);
> +		kunwind_init_from_caller(&state);
>  	} else {
> -		unwind_init_from_task(&state, task);
> +		kunwind_init_from_task(&state, task);
>  	}
>  
> -	unwind(&state, consume_entry, cookie);
> +	do_kunwind(&state, consume_entry, cookie);
>  }
>  
>  static bool dump_backtrace_entry(void *arg, unsigned long where)
> -- 
> 2.30.2
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h
index 508f734de46ee..f63dc654e545f 100644
--- a/arch/arm64/include/asm/stacktrace/common.h
+++ b/arch/arm64/include/asm/stacktrace/common.h
@@ -9,7 +9,6 @@ 
 #ifndef __ASM_STACKTRACE_COMMON_H
 #define __ASM_STACKTRACE_COMMON_H
 
-#include <linux/kprobes.h>
 #include <linux/types.h>
 
 struct stack_info {
@@ -23,12 +22,6 @@  struct stack_info {
  * @fp:          The fp value in the frame record (or the real fp)
  * @pc:          The lr value in the frame record (or the real lr)
  *
- * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
- *               associated with the most recently encountered replacement lr
- *               value.
- *
- * @task:        The task being unwound.
- *
  * @stack:       The stack currently being unwound.
  * @stacks:      An array of stacks which can be unwound.
  * @nr_stacks:   The number of stacks in @stacks.
@@ -36,10 +29,6 @@  struct stack_info {
 struct unwind_state {
 	unsigned long fp;
 	unsigned long pc;
-#ifdef CONFIG_KRETPROBES
-	struct llist_node *kr_cur;
-#endif
-	struct task_struct *task;
 
 	struct stack_info stack;
 	struct stack_info *stacks;
@@ -66,14 +55,8 @@  static inline bool stackinfo_on_stack(const struct stack_info *info,
 	return true;
 }
 
-static inline void unwind_init_common(struct unwind_state *state,
-				      struct task_struct *task)
+static inline void unwind_init_common(struct unwind_state *state)
 {
-	state->task = task;
-#ifdef CONFIG_KRETPROBES
-	state->kr_cur = NULL;
-#endif
-
 	state->stack = stackinfo_get_unknown();
 }
 
diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index 25ab83a315a76..44759281d0d43 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -31,7 +31,7 @@  static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
 					unsigned long fp,
 					unsigned long pc)
 {
-	unwind_init_common(state, NULL);
+	unwind_init_common(state);
 
 	state->fp = fp;
 	state->pc = pc;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 17f66a74c745c..aafc89192787a 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -8,6 +8,7 @@ 
 #include <linux/efi.h>
 #include <linux/export.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
@@ -18,6 +19,31 @@ 
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
 
+/*
+ * Kernel unwind state
+ *
+ * @common:      Common unwind state.
+ * @task:        The task being unwound.
+ * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
+ *               associated with the most recently encountered replacement lr
+ *               value.
+ */
+struct kunwind_state {
+	struct unwind_state common;
+	struct task_struct *task;
+#ifdef CONFIG_KRETPROBES
+	struct llist_node *kr_cur;
+#endif
+};
+
+static __always_inline void
+kunwind_init(struct kunwind_state *state,
+	     struct task_struct *task)
+{
+	unwind_init_common(&state->common);
+	state->task = task;
+}
+
 /*
  * Start an unwind from a pt_regs.
  *
@@ -26,13 +52,13 @@ 
  * The regs must be on a stack currently owned by the calling task.
  */
 static __always_inline void
-unwind_init_from_regs(struct unwind_state *state,
-		      struct pt_regs *regs)
+kunwind_init_from_regs(struct kunwind_state *state,
+		       struct pt_regs *regs)
 {
-	unwind_init_common(state, current);
+	kunwind_init(state, current);
 
-	state->fp = regs->regs[29];
-	state->pc = regs->pc;
+	state->common.fp = regs->regs[29];
+	state->common.pc = regs->pc;
 }
 
 /*
@@ -44,12 +70,12 @@  unwind_init_from_regs(struct unwind_state *state,
  * The function which invokes this must be noinline.
  */
 static __always_inline void
-unwind_init_from_caller(struct unwind_state *state)
+kunwind_init_from_caller(struct kunwind_state *state)
 {
-	unwind_init_common(state, current);
+	kunwind_init(state, current);
 
-	state->fp = (unsigned long)__builtin_frame_address(1);
-	state->pc = (unsigned long)__builtin_return_address(0);
+	state->common.fp = (unsigned long)__builtin_frame_address(1);
+	state->common.pc = (unsigned long)__builtin_return_address(0);
 }
 
 /*
@@ -63,35 +89,38 @@  unwind_init_from_caller(struct unwind_state *state)
  * call this for the current task.
  */
 static __always_inline void
-unwind_init_from_task(struct unwind_state *state,
-		      struct task_struct *task)
+kunwind_init_from_task(struct kunwind_state *state,
+		       struct task_struct *task)
 {
-	unwind_init_common(state, task);
+	kunwind_init(state, task);
 
-	state->fp = thread_saved_fp(task);
-	state->pc = thread_saved_pc(task);
+	state->common.fp = thread_saved_fp(task);
+	state->common.pc = thread_saved_pc(task);
 }
 
 static __always_inline int
-unwind_recover_return_address(struct unwind_state *state)
+kunwind_recover_return_address(struct kunwind_state *state)
 {
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	if (state->task->ret_stack &&
-	    (state->pc == (unsigned long)return_to_handler)) {
+	    (state->common.pc == (unsigned long)return_to_handler)) {
 		unsigned long orig_pc;
-		orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc,
-						(void *)state->fp);
-		if (WARN_ON_ONCE(state->pc == orig_pc))
+		orig_pc = ftrace_graph_ret_addr(state->task, NULL,
+						state->common.pc,
+						(void *)state->common.fp);
+		if (WARN_ON_ONCE(state->common.pc == orig_pc))
 			return -EINVAL;
-		state->pc = orig_pc;
+		state->common.pc = orig_pc;
 	}
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 #ifdef CONFIG_KRETPROBES
-	if (is_kretprobe_trampoline(state->pc)) {
-		state->pc = kretprobe_find_ret_addr(state->task,
-						    (void *)state->fp,
-						    &state->kr_cur);
+	if (is_kretprobe_trampoline(state->common.pc)) {
+		unsigned long orig_pc;
+		orig_pc = kretprobe_find_ret_addr(state->task,
+						  (void *)state->common.fp,
+						  &state->kr_cur);
+		state->common.pc = orig_pc;
 	}
 #endif /* CONFIG_KRETPROBES */
 
@@ -106,38 +135,38 @@  unwind_recover_return_address(struct unwind_state *state)
  * and the location (but not the fp value) of B.
  */
 static __always_inline int
-unwind_next(struct unwind_state *state)
+kunwind_next(struct kunwind_state *state)
 {
 	struct task_struct *tsk = state->task;
-	unsigned long fp = state->fp;
+	unsigned long fp = state->common.fp;
 	int err;
 
 	/* Final frame; nothing to unwind */
 	if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
 		return -ENOENT;
 
-	err = unwind_next_frame_record(state);
+	err = unwind_next_frame_record(&state->common);
 	if (err)
 		return err;
 
-	state->pc = ptrauth_strip_kernel_insn_pac(state->pc);
+	state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
 
-	return unwind_recover_return_address(state);
+	return kunwind_recover_return_address(state);
 }
 
 static __always_inline void
-unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry,
-       void *cookie)
+do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
+	   void *cookie)
 {
-	if (unwind_recover_return_address(state))
+	if (kunwind_recover_return_address(state))
 		return;
 
 	while (1) {
 		int ret;
 
-		if (!consume_entry(cookie, state->pc))
+		if (!consume_entry(cookie, state->common.pc))
 			break;
-		ret = unwind_next(state);
+		ret = kunwind_next(state);
 		if (ret < 0)
 			break;
 	}
@@ -190,22 +219,24 @@  noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
 		STACKINFO_EFI,
 #endif
 	};
-	struct unwind_state state = {
-		.stacks = stacks,
-		.nr_stacks = ARRAY_SIZE(stacks),
+	struct kunwind_state state = {
+		.common = {
+			.stacks = stacks,
+			.nr_stacks = ARRAY_SIZE(stacks),
+		},
 	};
 
 	if (regs) {
 		if (task != current)
 			return;
-		unwind_init_from_regs(&state, regs);
+		kunwind_init_from_regs(&state, regs);
 	} else if (task == current) {
-		unwind_init_from_caller(&state);
+		kunwind_init_from_caller(&state);
 	} else {
-		unwind_init_from_task(&state, task);
+		kunwind_init_from_task(&state, task);
 	}
 
-	unwind(&state, consume_entry, cookie);
+	do_kunwind(&state, consume_entry, cookie);
 }
 
 static bool dump_backtrace_entry(void *arg, unsigned long where)