diff mbox series

[bpf-next,1/2] libbpf: add support to set kprobe/uprobe attach mode

Message ID 20230203031742.1730761-2-imagedong@tencent.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series libbpf: allow users to set kprobe/uprobe attach mode | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers success CCed 12 of 12 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning CHECK: Please use a blank line after function/struct/union/enum declarations
netdev/kdoc success Errors and warnings before: 110 this patch: 110
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-7 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 fail Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 fail Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-15 fail Logs for test_progs on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-18 fail Logs for test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-20 fail Logs for test_progs_no_alu32 on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-23 fail Logs for test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_progs_parallel on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-38 success Logs for test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-16 fail Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 fail Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for test_progs_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-36 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_maps on s390x with gcc

Commit Message

Menglong Dong Feb. 3, 2023, 3:17 a.m. UTC
From: Menglong Dong <imagedong@tencent.com>

By default, libbpf will attach the kprobe/uprobe eBPF program in the
latest mode that supported by kernel. In this patch, we add the support
to let users manually attach kprobe/uprobe in legacy or perf mode.

There are 3 mode that supported by the kernel to attach kprobe/uprobe:

  LEGACY: create perf event in legacy way and don't use bpf_link
  PERF: create perf event with perf_event_open() and don't use bpf_link
  LINK: create perf event with perf_event_open() and use bpf_link

Users now can manually choose the mode with
bpf_program__attach_uprobe_opts()/bpf_program__attach_kprobe_opts().

Link: https://lore.kernel.org/bpf/20230113093427.1666466-1-imagedong@tencent.com/
Signed-off-by: Menglong Dong <imagedong@tencent.com>
---
 tools/lib/bpf/libbpf.c | 26 +++++++++++++++++++++++++-
 tools/lib/bpf/libbpf.h | 19 ++++++++++++++++---
 2 files changed, 41 insertions(+), 4 deletions(-)

Comments

Andrii Nakryiko Feb. 6, 2023, 7:58 p.m. UTC | #1
On Thu, Feb 2, 2023 at 7:18 PM <menglong8.dong@gmail.com> wrote:
>
> From: Menglong Dong <imagedong@tencent.com>
>
> By default, libbpf will attach the kprobe/uprobe eBPF program in the
> latest mode that supported by kernel. In this patch, we add the support
> to let users manually attach kprobe/uprobe in legacy or perf mode.
>
> There are 3 mode that supported by the kernel to attach kprobe/uprobe:
>
>   LEGACY: create perf event in legacy way and don't use bpf_link
>   PERF: create perf event with perf_event_open() and don't use bpf_link
>   LINK: create perf event with perf_event_open() and use bpf_link
>
> Users now can manually choose the mode with
> bpf_program__attach_uprobe_opts()/bpf_program__attach_kprobe_opts().
>
> Link: https://lore.kernel.org/bpf/20230113093427.1666466-1-imagedong@tencent.com/
> Signed-off-by: Menglong Dong <imagedong@tencent.com>
> ---
>  tools/lib/bpf/libbpf.c | 26 +++++++++++++++++++++++++-
>  tools/lib/bpf/libbpf.h | 19 ++++++++++++++++---
>  2 files changed, 41 insertions(+), 4 deletions(-)
>
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index eed5cec6f510..0d20bf1ee301 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -9784,7 +9784,7 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p
>         link->link.dealloc = &bpf_link_perf_dealloc;
>         link->perf_event_fd = pfd;
>
> -       if (kernel_supports(prog->obj, FEAT_PERF_LINK)) {
> +       if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !opts->no_link) {
>                 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
>                         .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
>
> @@ -10148,16 +10148,28 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
>         struct bpf_link *link;
>         size_t offset;
>         bool retprobe, legacy;
> +       enum probe_mode mode;
>         int pfd, err;
>
>         if (!OPTS_VALID(opts, bpf_kprobe_opts))
>                 return libbpf_err_ptr(-EINVAL);
>
> +       mode = OPTS_GET(opts, mode, PROBE_MODE_DEFAULT);
>         retprobe = OPTS_GET(opts, retprobe, false);
>         offset = OPTS_GET(opts, offset, 0);
>         pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
>
>         legacy = determine_kprobe_perf_type() < 0;
> +       switch (mode) {
> +       case PROBE_MODE_LEGACY:
> +               legacy = true;
> +       case PROBE_MODE_PERF:
> +               pe_opts.no_link = true;
> +               break;
> +       default:
> +               break;
> +       }
> +
>         if (!legacy) {
>                 pfd = perf_event_open_probe(false /* uprobe */, retprobe,
>                                             func_name, offset,
> @@ -10817,10 +10829,12 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
>         int pfd, err;
>         bool retprobe, legacy;
>         const char *func_name;
> +       enum probe_mode mode;
>
>         if (!OPTS_VALID(opts, bpf_uprobe_opts))
>                 return libbpf_err_ptr(-EINVAL);
>
> +       mode = OPTS_GET(opts, mode, PROBE_MODE_DEFAULT);
>         retprobe = OPTS_GET(opts, retprobe, false);
>         ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
>         pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
> @@ -10849,6 +10863,16 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
>         }
>
>         legacy = determine_uprobe_perf_type() < 0;
> +       switch (mode) {
> +       case PROBE_MODE_LEGACY:
> +               legacy = true;
> +       case PROBE_MODE_PERF:
> +               pe_opts.no_link = true;
> +               break;
> +       default:
> +               break;
> +       }
> +

I think this is a good place to also return errors early if, say, user
requested LINK mode, but that mode is not supported by kernel. Instead
of returning some -ENOTSUP generic error, we can error out early?
Similar for PERF mode, if only legacy is supported, and so on. Similar
for attach_uprobe, of course.

>         if (!legacy) {
>                 pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
>                                             func_offset, pid, ref_ctr_off);
> diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
> index 8777ff21ea1d..7fb474e036a3 100644
> --- a/tools/lib/bpf/libbpf.h
> +++ b/tools/lib/bpf/libbpf.h
> @@ -451,8 +451,10 @@ struct bpf_perf_event_opts {
>         size_t sz;
>         /* custom user-provided value fetchable through bpf_get_attach_cookie() */
>         __u64 bpf_cookie;
> +       /* don't use bpf_link when attach eBPF pprogram */

typo: pprogram

> +       bool no_link;

I've been struggling with this "no_link" name a bit. It is quite
confusing considering that from libbpf's API side we do return `struct
bpf_link`. So I'm thinking that maybe "force_ioctl_attach" would be a
better way to describe it (and will be scary enough for people not
knowing what this is about to not set it to true?)

Also, we'll need size_t: 0 at the end to avoid uninitialized padding
issues (like we have in kprobe_opts and uprobe_opts)

>  };
> -#define bpf_perf_event_opts__last_field bpf_cookie
> +#define bpf_perf_event_opts__last_field no_link
>
>  LIBBPF_API struct bpf_link *
>  bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
> @@ -461,6 +463,13 @@ LIBBPF_API struct bpf_link *
>  bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
>                                     const struct bpf_perf_event_opts *opts);
>
> +enum probe_mode {

shall we call it probe_attach_mode?

also let's elaborate a bit more in doc comment that specifying mode
will force libbpf to use that, but if kernel doesn't support it --
then we'll error out.

> +       PROBE_MODE_DEFAULT = 0, /* latest supported by kernel */
> +       PROBE_MODE_LEGACY,
> +       PROBE_MODE_PERF,
> +       PROBE_MODE_LINK,
> +};
> +
>  struct bpf_kprobe_opts {
>         /* size of this struct, for forward/backward compatiblity */
>         size_t sz;
> @@ -470,9 +479,11 @@ struct bpf_kprobe_opts {
>         size_t offset;
>         /* kprobe is return probe */
>         bool retprobe;
> +       /* kprobe attach mode */
> +       enum probe_mode mode;

nit: mode -> attach_mode?

>         size_t :0;
>  };
> -#define bpf_kprobe_opts__last_field retprobe
> +#define bpf_kprobe_opts__last_field mode
>
>  LIBBPF_API struct bpf_link *
>  bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe,
> @@ -570,9 +581,11 @@ struct bpf_uprobe_opts {
>          * binary_path.
>          */
>         const char *func_name;
> +       /* uprobe attach mode */
> +       enum probe_mode mode;
>         size_t :0;
>  };
> -#define bpf_uprobe_opts__last_field func_name
> +#define bpf_uprobe_opts__last_field mode

ditto

>
>  /**
>   * @brief **bpf_program__attach_uprobe()** attaches a BPF program
> --
> 2.39.0
>
Menglong Dong Feb. 7, 2023, 2:23 a.m. UTC | #2
On Tue, Feb 7, 2023 at 3:59 AM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Thu, Feb 2, 2023 at 7:18 PM <menglong8.dong@gmail.com> wrote:
> >
> > From: Menglong Dong <imagedong@tencent.com>
> >
> > By default, libbpf will attach the kprobe/uprobe eBPF program in the
> > latest mode that supported by kernel. In this patch, we add the support
> > to let users manually attach kprobe/uprobe in legacy or perf mode.
> >
> > There are 3 mode that supported by the kernel to attach kprobe/uprobe:
> >
> >   LEGACY: create perf event in legacy way and don't use bpf_link
> >   PERF: create perf event with perf_event_open() and don't use bpf_link
> >   LINK: create perf event with perf_event_open() and use bpf_link
> >
> > Users now can manually choose the mode with
> > bpf_program__attach_uprobe_opts()/bpf_program__attach_kprobe_opts().
> >
> > Link: https://lore.kernel.org/bpf/20230113093427.1666466-1-imagedong@tencent.com/
> > Signed-off-by: Menglong Dong <imagedong@tencent.com>
> > ---
> >  tools/lib/bpf/libbpf.c | 26 +++++++++++++++++++++++++-
> >  tools/lib/bpf/libbpf.h | 19 ++++++++++++++++---
> >  2 files changed, 41 insertions(+), 4 deletions(-)
> >
> > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> > index eed5cec6f510..0d20bf1ee301 100644
> > --- a/tools/lib/bpf/libbpf.c
> > +++ b/tools/lib/bpf/libbpf.c
> > @@ -9784,7 +9784,7 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p
> >         link->link.dealloc = &bpf_link_perf_dealloc;
> >         link->perf_event_fd = pfd;
> >
> > -       if (kernel_supports(prog->obj, FEAT_PERF_LINK)) {
> > +       if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !opts->no_link) {
> >                 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
> >                         .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
> >
> > @@ -10148,16 +10148,28 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
> >         struct bpf_link *link;
> >         size_t offset;
> >         bool retprobe, legacy;
> > +       enum probe_mode mode;
> >         int pfd, err;
> >
> >         if (!OPTS_VALID(opts, bpf_kprobe_opts))
> >                 return libbpf_err_ptr(-EINVAL);
> >
> > +       mode = OPTS_GET(opts, mode, PROBE_MODE_DEFAULT);
> >         retprobe = OPTS_GET(opts, retprobe, false);
> >         offset = OPTS_GET(opts, offset, 0);
> >         pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
> >
> >         legacy = determine_kprobe_perf_type() < 0;
> > +       switch (mode) {
> > +       case PROBE_MODE_LEGACY:
> > +               legacy = true;
> > +       case PROBE_MODE_PERF:
> > +               pe_opts.no_link = true;
> > +               break;
> > +       default:
> > +               break;
> > +       }
> > +
> >         if (!legacy) {
> >                 pfd = perf_event_open_probe(false /* uprobe */, retprobe,
> >                                             func_name, offset,
> > @@ -10817,10 +10829,12 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
> >         int pfd, err;
> >         bool retprobe, legacy;
> >         const char *func_name;
> > +       enum probe_mode mode;
> >
> >         if (!OPTS_VALID(opts, bpf_uprobe_opts))
> >                 return libbpf_err_ptr(-EINVAL);
> >
> > +       mode = OPTS_GET(opts, mode, PROBE_MODE_DEFAULT);
> >         retprobe = OPTS_GET(opts, retprobe, false);
> >         ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
> >         pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
> > @@ -10849,6 +10863,16 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
> >         }
> >
> >         legacy = determine_uprobe_perf_type() < 0;
> > +       switch (mode) {
> > +       case PROBE_MODE_LEGACY:
> > +               legacy = true;
> > +       case PROBE_MODE_PERF:
> > +               pe_opts.no_link = true;
> > +               break;
> > +       default:
> > +               break;
> > +       }
> > +
>
> I think this is a good place to also return errors early if, say, user
> requested LINK mode, but that mode is not supported by kernel. Instead
> of returning some -ENOTSUP generic error, we can error out early?
> Similar for PERF mode, if only legacy is supported, and so on. Similar
> for attach_uprobe, of course.
>

Yes, sounds great.

> >         if (!legacy) {
> >                 pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
> >                                             func_offset, pid, ref_ctr_off);
> > diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
> > index 8777ff21ea1d..7fb474e036a3 100644
> > --- a/tools/lib/bpf/libbpf.h
> > +++ b/tools/lib/bpf/libbpf.h
> > @@ -451,8 +451,10 @@ struct bpf_perf_event_opts {
> >         size_t sz;
> >         /* custom user-provided value fetchable through bpf_get_attach_cookie() */
> >         __u64 bpf_cookie;
> > +       /* don't use bpf_link when attach eBPF pprogram */
>
> typo: pprogram

Ops, get it!

>
> > +       bool no_link;
>
> I've been struggling with this "no_link" name a bit. It is quite
> confusing considering that from libbpf's API side we do return `struct
> bpf_link`. So I'm thinking that maybe "force_ioctl_attach" would be a
> better way to describe it (and will be scary enough for people not
> knowing what this is about to not set it to true?)
>

Yeah, "force_ioctl_attach" sounds more appropriate.

> Also, we'll need size_t: 0 at the end to avoid uninitialized padding
> issues (like we have in kprobe_opts and uprobe_opts)
>
> >  };
> > -#define bpf_perf_event_opts__last_field bpf_cookie
> > +#define bpf_perf_event_opts__last_field no_link
> >
> >  LIBBPF_API struct bpf_link *
> >  bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
> > @@ -461,6 +463,13 @@ LIBBPF_API struct bpf_link *
> >  bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
> >                                     const struct bpf_perf_event_opts *opts);
> >
> > +enum probe_mode {
>
> shall we call it probe_attach_mode?
>
> also let's elaborate a bit more in doc comment that specifying mode
> will force libbpf to use that, but if kernel doesn't support it --
> then we'll error out.
>

OK.

> > +       PROBE_MODE_DEFAULT = 0, /* latest supported by kernel */
> > +       PROBE_MODE_LEGACY,
> > +       PROBE_MODE_PERF,
> > +       PROBE_MODE_LINK,
> > +};
> > +
> >  struct bpf_kprobe_opts {
> >         /* size of this struct, for forward/backward compatiblity */
> >         size_t sz;
> > @@ -470,9 +479,11 @@ struct bpf_kprobe_opts {
> >         size_t offset;
> >         /* kprobe is return probe */
> >         bool retprobe;
> > +       /* kprobe attach mode */
> > +       enum probe_mode mode;
>
> nit: mode -> attach_mode?
>
> >         size_t :0;
> >  };
> > -#define bpf_kprobe_opts__last_field retprobe
> > +#define bpf_kprobe_opts__last_field mode
> >
> >  LIBBPF_API struct bpf_link *
> >  bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe,
> > @@ -570,9 +581,11 @@ struct bpf_uprobe_opts {
> >          * binary_path.
> >          */
> >         const char *func_name;
> > +       /* uprobe attach mode */
> > +       enum probe_mode mode;
> >         size_t :0;
> >  };
> > -#define bpf_uprobe_opts__last_field func_name
> > +#define bpf_uprobe_opts__last_field mode
>
> ditto
>

I'll fix these problems in the V2.

Thanks!
Menglong Dong

> >
> >  /**
> >   * @brief **bpf_program__attach_uprobe()** attaches a BPF program
> > --
> > 2.39.0
> >
diff mbox series

Patch

diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index eed5cec6f510..0d20bf1ee301 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -9784,7 +9784,7 @@  struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p
 	link->link.dealloc = &bpf_link_perf_dealloc;
 	link->perf_event_fd = pfd;
 
-	if (kernel_supports(prog->obj, FEAT_PERF_LINK)) {
+	if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !opts->no_link) {
 		DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
 			.perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
 
@@ -10148,16 +10148,28 @@  bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
 	struct bpf_link *link;
 	size_t offset;
 	bool retprobe, legacy;
+	enum probe_mode mode;
 	int pfd, err;
 
 	if (!OPTS_VALID(opts, bpf_kprobe_opts))
 		return libbpf_err_ptr(-EINVAL);
 
+	mode = OPTS_GET(opts, mode, PROBE_MODE_DEFAULT);
 	retprobe = OPTS_GET(opts, retprobe, false);
 	offset = OPTS_GET(opts, offset, 0);
 	pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
 
 	legacy = determine_kprobe_perf_type() < 0;
+	switch (mode) {
+	case PROBE_MODE_LEGACY:
+		legacy = true;
+	case PROBE_MODE_PERF:
+		pe_opts.no_link = true;
+		break;
+	default:
+		break;
+	}
+
 	if (!legacy) {
 		pfd = perf_event_open_probe(false /* uprobe */, retprobe,
 					    func_name, offset,
@@ -10817,10 +10829,12 @@  bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
 	int pfd, err;
 	bool retprobe, legacy;
 	const char *func_name;
+	enum probe_mode mode;
 
 	if (!OPTS_VALID(opts, bpf_uprobe_opts))
 		return libbpf_err_ptr(-EINVAL);
 
+	mode = OPTS_GET(opts, mode, PROBE_MODE_DEFAULT);
 	retprobe = OPTS_GET(opts, retprobe, false);
 	ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
 	pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
@@ -10849,6 +10863,16 @@  bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
 	}
 
 	legacy = determine_uprobe_perf_type() < 0;
+	switch (mode) {
+	case PROBE_MODE_LEGACY:
+		legacy = true;
+	case PROBE_MODE_PERF:
+		pe_opts.no_link = true;
+		break;
+	default:
+		break;
+	}
+
 	if (!legacy) {
 		pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
 					    func_offset, pid, ref_ctr_off);
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 8777ff21ea1d..7fb474e036a3 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -451,8 +451,10 @@  struct bpf_perf_event_opts {
 	size_t sz;
 	/* custom user-provided value fetchable through bpf_get_attach_cookie() */
 	__u64 bpf_cookie;
+	/* don't use bpf_link when attach eBPF pprogram */
+	bool no_link;
 };
-#define bpf_perf_event_opts__last_field bpf_cookie
+#define bpf_perf_event_opts__last_field no_link
 
 LIBBPF_API struct bpf_link *
 bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
@@ -461,6 +463,13 @@  LIBBPF_API struct bpf_link *
 bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
 				    const struct bpf_perf_event_opts *opts);
 
+enum probe_mode {
+	PROBE_MODE_DEFAULT = 0, /* latest supported by kernel */
+	PROBE_MODE_LEGACY,
+	PROBE_MODE_PERF,
+	PROBE_MODE_LINK,
+};
+
 struct bpf_kprobe_opts {
 	/* size of this struct, for forward/backward compatiblity */
 	size_t sz;
@@ -470,9 +479,11 @@  struct bpf_kprobe_opts {
 	size_t offset;
 	/* kprobe is return probe */
 	bool retprobe;
+	/* kprobe attach mode */
+	enum probe_mode mode;
 	size_t :0;
 };
-#define bpf_kprobe_opts__last_field retprobe
+#define bpf_kprobe_opts__last_field mode
 
 LIBBPF_API struct bpf_link *
 bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe,
@@ -570,9 +581,11 @@  struct bpf_uprobe_opts {
 	 * binary_path.
 	 */
 	const char *func_name;
+	/* uprobe attach mode */
+	enum probe_mode mode;
 	size_t :0;
 };
-#define bpf_uprobe_opts__last_field func_name
+#define bpf_uprobe_opts__last_field mode
 
 /**
  * @brief **bpf_program__attach_uprobe()** attaches a BPF program