diff mbox series

[v2] uprobes: make trace_uprobe->nhit counter a per-CPU one

Message ID 20240809192357.4061484-1-andrii@kernel.org (mailing list archive)
State Not Applicable
Headers show
Series [v2] uprobes: make trace_uprobe->nhit counter a per-CPU one | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

Andrii Nakryiko Aug. 9, 2024, 7:23 p.m. UTC
trace_uprobe->nhit counter is not incremented atomically, so its value
is questionable in when uprobe is hit on multiple CPUs simultaneously.

Also, doing this shared counter increment across many CPUs causes heavy
cache line bouncing, limiting uprobe/uretprobe performance scaling with
number of CPUs.

Solve both problems by making this a per-CPU counter.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 kernel/trace/trace_uprobe.c | 24 +++++++++++++++++++++---
 1 file changed, 21 insertions(+), 3 deletions(-)

Comments

Masami Hiramatsu (Google) Aug. 13, 2024, 1:30 p.m. UTC | #1
On Fri,  9 Aug 2024 12:23:57 -0700
Andrii Nakryiko <andrii@kernel.org> wrote:

> trace_uprobe->nhit counter is not incremented atomically, so its value
> is questionable in when uprobe is hit on multiple CPUs simultaneously.
> 
> Also, doing this shared counter increment across many CPUs causes heavy
> cache line bouncing, limiting uprobe/uretprobe performance scaling with
> number of CPUs.
> 
> Solve both problems by making this a per-CPU counter.
> 

This looks good to me. I would like to pick this to linux-trace/probes/for-next.

> @@ -62,7 +63,7 @@ struct trace_uprobe {
>  	struct uprobe			*uprobe;

BTW, what is this change? I couldn't cleanly apply this to the v6.11-rc3.
Which tree would you working on? (I missed something?)

Thanks,

>  	unsigned long			offset;
>  	unsigned long			ref_ctr_offset;
> -	unsigned long			nhit;
> +	unsigned long __percpu		*nhits;
>  	struct trace_probe		tp;
>  };
>  
> @@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
>  	if (!tu)
>  		return ERR_PTR(-ENOMEM);
>  
> +	tu->nhits = alloc_percpu(unsigned long);
> +	if (!tu->nhits) {
> +		ret = -ENOMEM;
> +		goto error;
> +	}
> +
>  	ret = trace_probe_init(&tu->tp, event, group, true, nargs);
>  	if (ret < 0)
>  		goto error;
> @@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
>  	return tu;
>  
>  error:
> +	free_percpu(tu->nhits);
>  	kfree(tu);
>  
>  	return ERR_PTR(ret);
> @@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu)
>  	path_put(&tu->path);
>  	trace_probe_cleanup(&tu->tp);
>  	kfree(tu->filename);
> +	free_percpu(tu->nhits);
>  	kfree(tu);
>  }
>  
> @@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
>  {
>  	struct dyn_event *ev = v;
>  	struct trace_uprobe *tu;
> +	unsigned long nhits;
> +	int cpu;
>  
>  	if (!is_trace_uprobe(ev))
>  		return 0;
>  
>  	tu = to_trace_uprobe(ev);
> +
> +	nhits = 0;
> +	for_each_possible_cpu(cpu) {
> +		nhits += READ_ONCE(*per_cpu_ptr(tu->nhits, cpu));
> +	}
> +
>  	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
> -			trace_probe_name(&tu->tp), tu->nhit);
> +		   trace_probe_name(&tu->tp), nhits);
>  	return 0;
>  }
>  
> @@ -1507,7 +1524,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
>  	int ret = 0;
>  
>  	tu = container_of(con, struct trace_uprobe, consumer);
> -	tu->nhit++;
> +
> +	this_cpu_inc(*tu->nhits);
>  
>  	udd.tu = tu;
>  	udd.bp_addr = instruction_pointer(regs);
> -- 
> 2.43.5
>
Oleg Nesterov Aug. 13, 2024, 2:50 p.m. UTC | #2
On 08/09, Andrii Nakryiko wrote:
>
> @@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
>  {
>  	struct dyn_event *ev = v;
>  	struct trace_uprobe *tu;
> +	unsigned long nhits;
> +	int cpu;
>
>  	if (!is_trace_uprobe(ev))
>  		return 0;
>
>  	tu = to_trace_uprobe(ev);
> +
> +	nhits = 0;
> +	for_each_possible_cpu(cpu) {
> +		nhits += READ_ONCE(*per_cpu_ptr(tu->nhits, cpu));

why not

		nhits += per_cpu(*tu->nhits, cpu);

?

See for example per_cpu_sum() or nr_processes(), per_cpu() should work just fine...

Other than that

Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Oleg Nesterov Aug. 13, 2024, 3:41 p.m. UTC | #3
On 08/13, Masami Hiramatsu wrote:
>
> > @@ -62,7 +63,7 @@ struct trace_uprobe {
> >  	struct uprobe			*uprobe;
>
> BTW, what is this change? I couldn't cleanly apply this to the v6.11-rc3.
> Which tree would you working on? (I missed something?)

tip/perf/core

See https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/diff/kernel/trace/trace_uprobe.c?h=perf/core&id=3c83a9ad0295eb63bdeb81d821b8c3b9417fbcac

Oleg.
Andrii Nakryiko Aug. 13, 2024, 5:05 p.m. UTC | #4
On Tue, Aug 13, 2024 at 7:50 AM Oleg Nesterov <oleg@redhat.com> wrote:
>
> On 08/09, Andrii Nakryiko wrote:
> >
> > @@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
> >  {
> >       struct dyn_event *ev = v;
> >       struct trace_uprobe *tu;
> > +     unsigned long nhits;
> > +     int cpu;
> >
> >       if (!is_trace_uprobe(ev))
> >               return 0;
> >
> >       tu = to_trace_uprobe(ev);
> > +
> > +     nhits = 0;
> > +     for_each_possible_cpu(cpu) {
> > +             nhits += READ_ONCE(*per_cpu_ptr(tu->nhits, cpu));
>
> why not
>
>                 nhits += per_cpu(*tu->nhits, cpu);
>
> ?
>
> See for example per_cpu_sum() or nr_processes(), per_cpu() should work just fine...
>

I just monkeyed it from some existing code somewhere in the BPF code
base. I like per_cpu, will send a v3 and rebase it onto a linux-trace
tree.

> Other than that
>
> Reviewed-by: Oleg Nesterov <oleg@redhat.com>
>
Masami Hiramatsu (Google) Aug. 25, 2024, 10:15 a.m. UTC | #5
On Tue, 13 Aug 2024 17:41:04 +0200
Oleg Nesterov <oleg@redhat.com> wrote:

> On 08/13, Masami Hiramatsu wrote:
> >
> > > @@ -62,7 +63,7 @@ struct trace_uprobe {
> > >  	struct uprobe			*uprobe;
> >
> > BTW, what is this change? I couldn't cleanly apply this to the v6.11-rc3.
> > Which tree would you working on? (I missed something?)
> 
> tip/perf/core
> 
> See https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/diff/kernel/trace/trace_uprobe.c?h=perf/core&id=3c83a9ad0295eb63bdeb81d821b8c3b9417fbcac

OK, let me consider to rebase on tip/perf/core.

Thank you,

> 
> Oleg.
>
Andrii Nakryiko Aug. 26, 2024, 4:17 p.m. UTC | #6
On Sun, Aug 25, 2024 at 3:15 AM Masami Hiramatsu <mhiramat@kernel.org> wrote:
>
> On Tue, 13 Aug 2024 17:41:04 +0200
> Oleg Nesterov <oleg@redhat.com> wrote:
>
> > On 08/13, Masami Hiramatsu wrote:
> > >
> > > > @@ -62,7 +63,7 @@ struct trace_uprobe {
> > > >   struct uprobe                   *uprobe;
> > >
> > > BTW, what is this change? I couldn't cleanly apply this to the v6.11-rc3.
> > > Which tree would you working on? (I missed something?)
> >
> > tip/perf/core
> >
> > See https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/diff/kernel/trace/trace_uprobe.c?h=perf/core&id=3c83a9ad0295eb63bdeb81d821b8c3b9417fbcac
>
> OK, let me consider to rebase on tip/perf/core.
>

Hey Masami,

I've posted v3 rebased onto linux-trace/probes/for-next, so you
shouldn't need to rebase anything just for this. See [0] for the
latest revision.

  [0] https://lore.kernel.org/linux-trace-kernel/20240813203409.3985398-1-andrii@kernel.org/

> Thank you,
>
> >
> > Oleg.
> >
>
>
> --
> Masami Hiramatsu (Google) <mhiramat@kernel.org>
diff mbox series

Patch

diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 52e76a73fa7c..002f801a7ab4 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -17,6 +17,7 @@ 
 #include <linux/string.h>
 #include <linux/rculist.h>
 #include <linux/filter.h>
+#include <linux/percpu.h>
 
 #include "trace_dynevent.h"
 #include "trace_probe.h"
@@ -62,7 +63,7 @@  struct trace_uprobe {
 	struct uprobe			*uprobe;
 	unsigned long			offset;
 	unsigned long			ref_ctr_offset;
-	unsigned long			nhit;
+	unsigned long __percpu		*nhits;
 	struct trace_probe		tp;
 };
 
@@ -337,6 +338,12 @@  alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
 	if (!tu)
 		return ERR_PTR(-ENOMEM);
 
+	tu->nhits = alloc_percpu(unsigned long);
+	if (!tu->nhits) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
 	ret = trace_probe_init(&tu->tp, event, group, true, nargs);
 	if (ret < 0)
 		goto error;
@@ -349,6 +356,7 @@  alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
 	return tu;
 
 error:
+	free_percpu(tu->nhits);
 	kfree(tu);
 
 	return ERR_PTR(ret);
@@ -362,6 +370,7 @@  static void free_trace_uprobe(struct trace_uprobe *tu)
 	path_put(&tu->path);
 	trace_probe_cleanup(&tu->tp);
 	kfree(tu->filename);
+	free_percpu(tu->nhits);
 	kfree(tu);
 }
 
@@ -815,13 +824,21 @@  static int probes_profile_seq_show(struct seq_file *m, void *v)
 {
 	struct dyn_event *ev = v;
 	struct trace_uprobe *tu;
+	unsigned long nhits;
+	int cpu;
 
 	if (!is_trace_uprobe(ev))
 		return 0;
 
 	tu = to_trace_uprobe(ev);
+
+	nhits = 0;
+	for_each_possible_cpu(cpu) {
+		nhits += READ_ONCE(*per_cpu_ptr(tu->nhits, cpu));
+	}
+
 	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
-			trace_probe_name(&tu->tp), tu->nhit);
+		   trace_probe_name(&tu->tp), nhits);
 	return 0;
 }
 
@@ -1507,7 +1524,8 @@  static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
 	int ret = 0;
 
 	tu = container_of(con, struct trace_uprobe, consumer);
-	tu->nhit++;
+
+	this_cpu_inc(*tu->nhits);
 
 	udd.tu = tu;
 	udd.bp_addr = instruction_pointer(regs);