diff mbox series

[v19,19/19] bpf: Use ftrace_get_symaddr() in get_entry_ip()

Message ID 173125395146.172790.15945895464150788842.stgit@devnote2 (mailing list archive)
State Not Applicable
Delegated to: BPF
Headers show
Series tracing: fprobe: function_graph: Multi-function graph and fprobe on fgraph | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply
bpf/vmtest-bpf-PR fail merge-conflict

Commit Message

Masami Hiramatsu (Google) Nov. 10, 2024, 3:52 p.m. UTC
From: Masami Hiramatsu (Google) <mhiramat@kernel.org>

Rewrite get_entry_ip() to use ftrace_get_symaddr() macro.

Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
---
 Changes in v19:
  - Use ftrace_get_symaddr() instead of introducing new arch dependent code.
  - Also, replace x86 code with ftrace_get_symaddr(), which does the same
   thing.
---
 kernel/trace/bpf_trace.c |   19 ++-----------------
 1 file changed, 2 insertions(+), 17 deletions(-)

Comments

Masami Hiramatsu (Google) Dec. 9, 2024, 9:29 a.m. UTC | #1
On Mon, 11 Nov 2024 00:52:31 +0900
"Masami Hiramatsu (Google)" <mhiramat@kernel.org> wrote:

> From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> 
> Rewrite get_entry_ip() to use ftrace_get_symaddr() macro.

I found a root problem of this patch. This get_entry_ip() is used not
only for fprobe (kprobe_multi) but also kprobes, but that is wrong.
On x86, both kprobes and ftrace (fentry) have the same restriction,
it should avoid ENDBR. But on arm64, ftrace_get_symaddr() is only for
fprobe, and kp->addr should point the symbol address.

So what I should do is to use `ftrace_get_symaddr()` version for
fprobe (kprobe_multi) and keep this original function for kprobe.

Let me fix that.

Thanks,

> 
> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> ---
>  Changes in v19:
>   - Use ftrace_get_symaddr() instead of introducing new arch dependent code.
>   - Also, replace x86 code with ftrace_get_symaddr(), which does the same
>    thing.
> ---
>  kernel/trace/bpf_trace.c |   19 ++-----------------
>  1 file changed, 2 insertions(+), 17 deletions(-)
> 
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index 1532e9172bf9..e848a782bc8d 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -1024,27 +1024,12 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
>  	.arg1_type	= ARG_PTR_TO_CTX,
>  };
>  
> -#ifdef CONFIG_X86_KERNEL_IBT
>  static unsigned long get_entry_ip(unsigned long fentry_ip)
>  {
> -	u32 instr;
> +	unsigned long ret = ftrace_get_symaddr(fentry_ip);
>  
> -	/* We want to be extra safe in case entry ip is on the page edge,
> -	 * but otherwise we need to avoid get_kernel_nofault()'s overhead.
> -	 */
> -	if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
> -		if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
> -			return fentry_ip;
> -	} else {
> -		instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
> -	}
> -	if (is_endbr(instr))
> -		fentry_ip -= ENDBR_INSN_SIZE;
> -	return fentry_ip;
> +	return ret ? : fentry_ip;
>  }
> -#else
> -#define get_entry_ip(fentry_ip) fentry_ip
> -#endif
>  
>  BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
>  {
> 
>
diff mbox series

Patch

diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 1532e9172bf9..e848a782bc8d 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1024,27 +1024,12 @@  static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
 	.arg1_type	= ARG_PTR_TO_CTX,
 };
 
-#ifdef CONFIG_X86_KERNEL_IBT
 static unsigned long get_entry_ip(unsigned long fentry_ip)
 {
-	u32 instr;
+	unsigned long ret = ftrace_get_symaddr(fentry_ip);
 
-	/* We want to be extra safe in case entry ip is on the page edge,
-	 * but otherwise we need to avoid get_kernel_nofault()'s overhead.
-	 */
-	if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
-		if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
-			return fentry_ip;
-	} else {
-		instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
-	}
-	if (is_endbr(instr))
-		fentry_ip -= ENDBR_INSN_SIZE;
-	return fentry_ip;
+	return ret ? : fentry_ip;
 }
-#else
-#define get_entry_ip(fentry_ip) fentry_ip
-#endif
 
 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
 {