diff mbox series

[PATCHv2,perf/core,2/3] perf tools: Register fallback libbpf section handler

Message ID 20220510074659.2557731-3-jolsa@kernel.org (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series perf tools: Fix prologue generation | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail merge-conflict
netdev/tree_selection success Not a local patch, async

Commit Message

Jiri Olsa May 10, 2022, 7:46 a.m. UTC
Perf is using section name to declare special kprobe arguments,
which no longer works with current libbpf, that either requires
certain form of the section name or allows to register custom
handler.

Adding perf support to register 'fallback' section handler to take
care of perf kprobe programs. The fallback means that it handles
any section definition besides the ones that libbpf handles.

The handler serves two purposes:
  - allows perf programs to have special arguments in section name
  - allows perf to use pre-load callback where we can attach init
    code (zeroing all argument registers) to each perf program

The second is essential part of new prologue generation code,
that's coming in following patch.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
 tools/perf/util/bpf-loader.c | 47 ++++++++++++++++++++++++++++++++++++
 1 file changed, 47 insertions(+)

Comments

Andrii Nakryiko May 10, 2022, 11:45 p.m. UTC | #1
On Tue, May 10, 2022 at 12:47 AM Jiri Olsa <jolsa@kernel.org> wrote:
>
> Perf is using section name to declare special kprobe arguments,
> which no longer works with current libbpf, that either requires
> certain form of the section name or allows to register custom
> handler.
>
> Adding perf support to register 'fallback' section handler to take
> care of perf kprobe programs. The fallback means that it handles
> any section definition besides the ones that libbpf handles.
>
> The handler serves two purposes:
>   - allows perf programs to have special arguments in section name
>   - allows perf to use pre-load callback where we can attach init
>     code (zeroing all argument registers) to each perf program
>
> The second is essential part of new prologue generation code,
> that's coming in following patch.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
>  tools/perf/util/bpf-loader.c | 47 ++++++++++++++++++++++++++++++++++++
>  1 file changed, 47 insertions(+)
>
> diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
> index f8ad581ea247..2a2c9512c4e8 100644
> --- a/tools/perf/util/bpf-loader.c
> +++ b/tools/perf/util/bpf-loader.c
> @@ -86,6 +86,7 @@ bpf_perf_object__next(struct bpf_perf_object *prev)
>              (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
>
>  static bool libbpf_initialized;
> +static int libbpf_sec_handler;
>
>  static int bpf_perf_object__add(struct bpf_object *obj)
>  {
> @@ -99,12 +100,58 @@ static int bpf_perf_object__add(struct bpf_object *obj)
>         return perf_obj ? 0 : -ENOMEM;
>  }
>
> +static struct bpf_insn prologue_init_insn[] = {
> +       BPF_MOV64_IMM(BPF_REG_0, 0),
> +       BPF_MOV64_IMM(BPF_REG_1, 0),

R0 should be initialized before exit anyway. R1 contains context, so
doesn't need initialization, so I think you only need R2-R5?

> +       BPF_MOV64_IMM(BPF_REG_2, 0),
> +       BPF_MOV64_IMM(BPF_REG_3, 0),
> +       BPF_MOV64_IMM(BPF_REG_4, 0),
> +       BPF_MOV64_IMM(BPF_REG_5, 0),
> +};
> +
> +static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
> +                                      struct bpf_prog_load_opts *opts __maybe_unused,
> +                                      long cookie __maybe_unused)
> +{
> +       size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
> +       size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
> +       const struct bpf_insn *orig_insn;
> +       struct bpf_insn *insn;
> +
> +       /* prepend initialization code to program instructions */
> +       orig_insn = bpf_program__insns(prog);
> +       orig_insn_cnt = bpf_program__insn_cnt(prog);
> +       init_size = init_size_cnt * sizeof(*insn);
> +       orig_size = orig_insn_cnt * sizeof(*insn);
> +
> +       insn_cnt = orig_insn_cnt + init_size_cnt;
> +       insn = malloc(insn_cnt * sizeof(*insn));
> +       if (!insn)
> +               return -ENOMEM;
> +
> +       memcpy(insn, prologue_init_insn, init_size);
> +       memcpy((char *) insn + init_size, orig_insn, orig_size);
> +       bpf_program__set_insns(prog, insn, insn_cnt);
> +       return 0;
> +}
> +
>  static int libbpf_init(void)
>  {
> +       LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
> +               .prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
> +       );
> +
>         if (libbpf_initialized)
>                 return 0;
>
>         libbpf_set_print(libbpf_perf_print);
> +       libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
> +                                                         0, &handler_opts);
> +       if (libbpf_sec_handler < 0) {
> +               pr_debug("bpf: failed to register libbpf section handler: %d\n",
> +                        libbpf_sec_handler);
> +               return -BPF_LOADER_ERRNO__INTERNAL;
> +       }
>         libbpf_initialized = true;
>         return 0;
>  }
> --
> 2.35.3
>
Jiri Olsa May 11, 2022, 7:36 a.m. UTC | #2
On Tue, May 10, 2022 at 04:45:01PM -0700, Andrii Nakryiko wrote:
> On Tue, May 10, 2022 at 12:47 AM Jiri Olsa <jolsa@kernel.org> wrote:
> >
> > Perf is using section name to declare special kprobe arguments,
> > which no longer works with current libbpf, that either requires
> > certain form of the section name or allows to register custom
> > handler.
> >
> > Adding perf support to register 'fallback' section handler to take
> > care of perf kprobe programs. The fallback means that it handles
> > any section definition besides the ones that libbpf handles.
> >
> > The handler serves two purposes:
> >   - allows perf programs to have special arguments in section name
> >   - allows perf to use pre-load callback where we can attach init
> >     code (zeroing all argument registers) to each perf program
> >
> > The second is essential part of new prologue generation code,
> > that's coming in following patch.
> >
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> >  tools/perf/util/bpf-loader.c | 47 ++++++++++++++++++++++++++++++++++++
> >  1 file changed, 47 insertions(+)
> >
> > diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
> > index f8ad581ea247..2a2c9512c4e8 100644
> > --- a/tools/perf/util/bpf-loader.c
> > +++ b/tools/perf/util/bpf-loader.c
> > @@ -86,6 +86,7 @@ bpf_perf_object__next(struct bpf_perf_object *prev)
> >              (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
> >
> >  static bool libbpf_initialized;
> > +static int libbpf_sec_handler;
> >
> >  static int bpf_perf_object__add(struct bpf_object *obj)
> >  {
> > @@ -99,12 +100,58 @@ static int bpf_perf_object__add(struct bpf_object *obj)
> >         return perf_obj ? 0 : -ENOMEM;
> >  }
> >
> > +static struct bpf_insn prologue_init_insn[] = {
> > +       BPF_MOV64_IMM(BPF_REG_0, 0),
> > +       BPF_MOV64_IMM(BPF_REG_1, 0),
> 
> R0 should be initialized before exit anyway. R1 contains context, so
> doesn't need initialization, so I think you only need R2-R5?

ah right, I'll remove that

thanks,
jirka

> 
> > +       BPF_MOV64_IMM(BPF_REG_2, 0),
> > +       BPF_MOV64_IMM(BPF_REG_3, 0),
> > +       BPF_MOV64_IMM(BPF_REG_4, 0),
> > +       BPF_MOV64_IMM(BPF_REG_5, 0),
> > +};
> > +
> > +static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
> > +                                      struct bpf_prog_load_opts *opts __maybe_unused,
> > +                                      long cookie __maybe_unused)
> > +{
> > +       size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
> > +       size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
> > +       const struct bpf_insn *orig_insn;
> > +       struct bpf_insn *insn;
> > +
> > +       /* prepend initialization code to program instructions */
> > +       orig_insn = bpf_program__insns(prog);
> > +       orig_insn_cnt = bpf_program__insn_cnt(prog);
> > +       init_size = init_size_cnt * sizeof(*insn);
> > +       orig_size = orig_insn_cnt * sizeof(*insn);
> > +
> > +       insn_cnt = orig_insn_cnt + init_size_cnt;
> > +       insn = malloc(insn_cnt * sizeof(*insn));
> > +       if (!insn)
> > +               return -ENOMEM;
> > +
> > +       memcpy(insn, prologue_init_insn, init_size);
> > +       memcpy((char *) insn + init_size, orig_insn, orig_size);
> > +       bpf_program__set_insns(prog, insn, insn_cnt);
> > +       return 0;
> > +}
> > +
> >  static int libbpf_init(void)
> >  {
> > +       LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
> > +               .prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
> > +       );
> > +
> >         if (libbpf_initialized)
> >                 return 0;
> >
> >         libbpf_set_print(libbpf_perf_print);
> > +       libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
> > +                                                         0, &handler_opts);
> > +       if (libbpf_sec_handler < 0) {
> > +               pr_debug("bpf: failed to register libbpf section handler: %d\n",
> > +                        libbpf_sec_handler);
> > +               return -BPF_LOADER_ERRNO__INTERNAL;
> > +       }
> >         libbpf_initialized = true;
> >         return 0;
> >  }
> > --
> > 2.35.3
> >
diff mbox series

Patch

diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index f8ad581ea247..2a2c9512c4e8 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -86,6 +86,7 @@  bpf_perf_object__next(struct bpf_perf_object *prev)
 	     (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
 
 static bool libbpf_initialized;
+static int libbpf_sec_handler;
 
 static int bpf_perf_object__add(struct bpf_object *obj)
 {
@@ -99,12 +100,58 @@  static int bpf_perf_object__add(struct bpf_object *obj)
 	return perf_obj ? 0 : -ENOMEM;
 }
 
+static struct bpf_insn prologue_init_insn[] = {
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_MOV64_IMM(BPF_REG_1, 0),
+	BPF_MOV64_IMM(BPF_REG_2, 0),
+	BPF_MOV64_IMM(BPF_REG_3, 0),
+	BPF_MOV64_IMM(BPF_REG_4, 0),
+	BPF_MOV64_IMM(BPF_REG_5, 0),
+};
+
+static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
+				       struct bpf_prog_load_opts *opts __maybe_unused,
+				       long cookie __maybe_unused)
+{
+	size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
+	size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
+	const struct bpf_insn *orig_insn;
+	struct bpf_insn *insn;
+
+	/* prepend initialization code to program instructions */
+	orig_insn = bpf_program__insns(prog);
+	orig_insn_cnt = bpf_program__insn_cnt(prog);
+	init_size = init_size_cnt * sizeof(*insn);
+	orig_size = orig_insn_cnt * sizeof(*insn);
+
+	insn_cnt = orig_insn_cnt + init_size_cnt;
+	insn = malloc(insn_cnt * sizeof(*insn));
+	if (!insn)
+		return -ENOMEM;
+
+	memcpy(insn, prologue_init_insn, init_size);
+	memcpy((char *) insn + init_size, orig_insn, orig_size);
+	bpf_program__set_insns(prog, insn, insn_cnt);
+	return 0;
+}
+
 static int libbpf_init(void)
 {
+	LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
+		.prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
+	);
+
 	if (libbpf_initialized)
 		return 0;
 
 	libbpf_set_print(libbpf_perf_print);
+	libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
+							  0, &handler_opts);
+	if (libbpf_sec_handler < 0) {
+		pr_debug("bpf: failed to register libbpf section handler: %d\n",
+			 libbpf_sec_handler);
+		return -BPF_LOADER_ERRNO__INTERNAL;
+	}
 	libbpf_initialized = true;
 	return 0;
 }