diff mbox series

[bpf-next] libbpf: add libbpf_load_vmlinux_btf/libbpf_load_module_btf APIs

Message ID 20210724051256.1629110-1-hengqi.chen@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series [bpf-next] libbpf: add libbpf_load_vmlinux_btf/libbpf_load_module_btf APIs | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 4 maintainers not CCed: netdev@vger.kernel.org kafai@fb.com songliubraving@fb.com kpsingh@kernel.org
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 88 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/header_inline success Link

Commit Message

Hengqi Chen July 24, 2021, 5:12 a.m. UTC
Add libbpf_load_vmlinux_btf/libbpf_load_module_btf APIs.
This is part of the libbpf v1.0. [1]

[1] https://github.com/libbpf/libbpf/issues/280

Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
---
 tools/lib/bpf/btf.c      | 24 +++++++++++++++++++++++-
 tools/lib/bpf/btf.h      |  2 ++
 tools/lib/bpf/libbpf.c   |  8 ++++----
 tools/lib/bpf/libbpf.map |  2 ++
 4 files changed, 31 insertions(+), 5 deletions(-)

Comments

Andrii Nakryiko July 26, 2021, 10:49 p.m. UTC | #1
On Fri, Jul 23, 2021 at 10:13 PM Hengqi Chen <hengqi.chen@gmail.com> wrote:
>
> Add libbpf_load_vmlinux_btf/libbpf_load_module_btf APIs.
> This is part of the libbpf v1.0. [1]
>
> [1] https://github.com/libbpf/libbpf/issues/280

Saying it's part of libbpf 1.0 effort and given a link to Github PR is
not really a sufficient commit message. Please expand on what you are
doing in the patch and why.

>
> Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
> ---
>  tools/lib/bpf/btf.c      | 24 +++++++++++++++++++++++-
>  tools/lib/bpf/btf.h      |  2 ++
>  tools/lib/bpf/libbpf.c   |  8 ++++----
>  tools/lib/bpf/libbpf.map |  2 ++
>  4 files changed, 31 insertions(+), 5 deletions(-)
>
> diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
> index b46760b93bb4..414e1c5635ef 100644
> --- a/tools/lib/bpf/btf.c
> +++ b/tools/lib/bpf/btf.c
> @@ -4021,7 +4021,7 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
>                  */
>                 if (d->hypot_adjust_canon)
>                         continue;
> -
> +
>                 if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
>                         d->map[t_id] = c_id;
>
> @@ -4395,6 +4395,11 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
>   * data out of it to use for target BTF.
>   */
>  struct btf *libbpf_find_kernel_btf(void)
> +{
> +       return libbpf_load_vmlinux_btf();
> +}
> +
> +struct btf *libbpf_load_vmlinux_btf(void)
>  {
>         struct {
>                 const char *path_fmt;
> @@ -4440,6 +4445,23 @@ struct btf *libbpf_find_kernel_btf(void)
>         return libbpf_err_ptr(-ESRCH);
>  }
>
> +struct btf *libbpf_load_module_btf(const char *mod)

So we probably need to allow user to pre-load and re-use vmlinux BTF
for efficiency, especially if they have some use-case to load a lot of
BTFs.

> +{
> +       char path[80];
> +       struct btf *base;
> +       int err;
> +
> +       base = libbpf_load_vmlinux_btf();
> +       err = libbpf_get_error(base);
> +       if (err) {
> +               pr_warn("Error loading vmlinux BTF: %d\n", err);
> +               return base;

libbpf_err_ptr() needs to be used here, pr_warn() could have destroyed
errno already

> +       }
> +
> +       snprintf(path, sizeof(path), "/sys/kernel/btf/%s", mod);
> +       return btf__parse_split(path, base);

so who's freeing base BTF in this case?

> +}
> +
>  int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
>  {
>         int i, n, err;

[...]
Hengqi Chen July 27, 2021, 1:12 a.m. UTC | #2
On 7/27/21 6:49 AM, Andrii Nakryiko wrote:
> On Fri, Jul 23, 2021 at 10:13 PM Hengqi Chen <hengqi.chen@gmail.com> wrote:
>>
>> Add libbpf_load_vmlinux_btf/libbpf_load_module_btf APIs.
>> This is part of the libbpf v1.0. [1]
>>
>> [1] https://github.com/libbpf/libbpf/issues/280
> 
> Saying it's part of libbpf 1.0 effort and given a link to Github PR is
> not really a sufficient commit message. Please expand on what you are
> doing in the patch and why.
> 

Will do.

>>
>> Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
>> ---
>>  tools/lib/bpf/btf.c      | 24 +++++++++++++++++++++++-
>>  tools/lib/bpf/btf.h      |  2 ++
>>  tools/lib/bpf/libbpf.c   |  8 ++++----
>>  tools/lib/bpf/libbpf.map |  2 ++
>>  4 files changed, 31 insertions(+), 5 deletions(-)
>>
>> diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
>> index b46760b93bb4..414e1c5635ef 100644
>> --- a/tools/lib/bpf/btf.c
>> +++ b/tools/lib/bpf/btf.c
>> @@ -4021,7 +4021,7 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
>>                  */
>>                 if (d->hypot_adjust_canon)
>>                         continue;
>> -
>> +
>>                 if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
>>                         d->map[t_id] = c_id;
>>
>> @@ -4395,6 +4395,11 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
>>   * data out of it to use for target BTF.
>>   */
>>  struct btf *libbpf_find_kernel_btf(void)
>> +{
>> +       return libbpf_load_vmlinux_btf();
>> +}
>> +
>> +struct btf *libbpf_load_vmlinux_btf(void)
>>  {
>>         struct {
>>                 const char *path_fmt;
>> @@ -4440,6 +4445,23 @@ struct btf *libbpf_find_kernel_btf(void)
>>         return libbpf_err_ptr(-ESRCH);
>>  }
>>
>> +struct btf *libbpf_load_module_btf(const char *mod)
> 
> So we probably need to allow user to pre-load and re-use vmlinux BTF
> for efficiency, especially if they have some use-case to load a lot of
> BTFs.
> 

Should the API change to this ?

struct btf *libbpf_load_module_btf(struct btf *base, const char *mod)

It seems better for the use-case you mentioned.

>> +{
>> +       char path[80];
>> +       struct btf *base;
>> +       int err;
>> +
>> +       base = libbpf_load_vmlinux_btf();
>> +       err = libbpf_get_error(base);
>> +       if (err) {
>> +               pr_warn("Error loading vmlinux BTF: %d\n", err);
>> +               return base;
> 
> libbpf_err_ptr() needs to be used here, pr_warn() could have destroyed
> errno already
> 

OK.

>> +       }
>> +
>> +       snprintf(path, sizeof(path), "/sys/kernel/btf/%s", mod);
>> +       return btf__parse_split(path, base);
> 
> so who's freeing base BTF in this case?
> 

Sorry, missed that.
But if we change the signature, then leave this to user.

>> +}
>> +
>>  int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
>>  {
>>         int i, n, err;
> 
> [...]
>
Andrii Nakryiko July 27, 2021, 6:18 p.m. UTC | #3
On Mon, Jul 26, 2021 at 6:12 PM Hengqi Chen <hengqi.chen@gmail.com> wrote:
>
>
>
> On 7/27/21 6:49 AM, Andrii Nakryiko wrote:
> > On Fri, Jul 23, 2021 at 10:13 PM Hengqi Chen <hengqi.chen@gmail.com> wrote:
> >>
> >> Add libbpf_load_vmlinux_btf/libbpf_load_module_btf APIs.
> >> This is part of the libbpf v1.0. [1]
> >>
> >> [1] https://github.com/libbpf/libbpf/issues/280
> >
> > Saying it's part of libbpf 1.0 effort and given a link to Github PR is
> > not really a sufficient commit message. Please expand on what you are
> > doing in the patch and why.
> >
>
> Will do.
>
> >>
> >> Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
> >> ---
> >>  tools/lib/bpf/btf.c      | 24 +++++++++++++++++++++++-
> >>  tools/lib/bpf/btf.h      |  2 ++
> >>  tools/lib/bpf/libbpf.c   |  8 ++++----
> >>  tools/lib/bpf/libbpf.map |  2 ++
> >>  4 files changed, 31 insertions(+), 5 deletions(-)
> >>
> >> diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
> >> index b46760b93bb4..414e1c5635ef 100644
> >> --- a/tools/lib/bpf/btf.c
> >> +++ b/tools/lib/bpf/btf.c
> >> @@ -4021,7 +4021,7 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
> >>                  */
> >>                 if (d->hypot_adjust_canon)
> >>                         continue;
> >> -
> >> +
> >>                 if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
> >>                         d->map[t_id] = c_id;
> >>
> >> @@ -4395,6 +4395,11 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
> >>   * data out of it to use for target BTF.
> >>   */
> >>  struct btf *libbpf_find_kernel_btf(void)
> >> +{
> >> +       return libbpf_load_vmlinux_btf();
> >> +}
> >> +
> >> +struct btf *libbpf_load_vmlinux_btf(void)
> >>  {
> >>         struct {
> >>                 const char *path_fmt;
> >> @@ -4440,6 +4445,23 @@ struct btf *libbpf_find_kernel_btf(void)
> >>         return libbpf_err_ptr(-ESRCH);
> >>  }
> >>
> >> +struct btf *libbpf_load_module_btf(const char *mod)
> >
> > So we probably need to allow user to pre-load and re-use vmlinux BTF
> > for efficiency, especially if they have some use-case to load a lot of
> > BTFs.
> >
>
> Should the API change to this ?
>
> struct btf *libbpf_load_module_btf(struct btf *base, const char *mod)
>
> It seems better for the use-case you mentioned.

Something like this, yeah. Let's put struct btf * as the last argument
and module name as a first one, to follow the btf__parse_split()
convention of having base_btf the last. And maybe let's call base a
"vmlinux_btf", as in this case it's pretty specific that it's supposed
to be the vmlinux BTF, not just any random BTF.

>
> >> +{
> >> +       char path[80];
> >> +       struct btf *base;
> >> +       int err;
> >> +
> >> +       base = libbpf_load_vmlinux_btf();
> >> +       err = libbpf_get_error(base);
> >> +       if (err) {
> >> +               pr_warn("Error loading vmlinux BTF: %d\n", err);
> >> +               return base;
> >
> > libbpf_err_ptr() needs to be used here, pr_warn() could have destroyed
> > errno already
> >
>
> OK.
>
> >> +       }
> >> +
> >> +       snprintf(path, sizeof(path), "/sys/kernel/btf/%s", mod);
> >> +       return btf__parse_split(path, base);
> >
> > so who's freeing base BTF in this case?
> >
>
> Sorry, missed that.
> But if we change the signature, then leave this to user.

Right, that's what I was getting at. If we make vmlinux BTF
non-optional, then user will have to own freeing it, just like we do
that for other APIs w/ base BTF.

>
> >> +}
> >> +
> >>  int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
> >>  {
> >>         int i, n, err;
> >
> > [...]
> >
diff mbox series

Patch

diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index b46760b93bb4..414e1c5635ef 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -4021,7 +4021,7 @@  static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
 		 */
 		if (d->hypot_adjust_canon)
 			continue;
-		
+
 		if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
 			d->map[t_id] = c_id;
 
@@ -4395,6 +4395,11 @@  static int btf_dedup_remap_types(struct btf_dedup *d)
  * data out of it to use for target BTF.
  */
 struct btf *libbpf_find_kernel_btf(void)
+{
+	return libbpf_load_vmlinux_btf();
+}
+
+struct btf *libbpf_load_vmlinux_btf(void)
 {
 	struct {
 		const char *path_fmt;
@@ -4440,6 +4445,23 @@  struct btf *libbpf_find_kernel_btf(void)
 	return libbpf_err_ptr(-ESRCH);
 }
 
+struct btf *libbpf_load_module_btf(const char *mod)
+{
+	char path[80];
+	struct btf *base;
+	int err;
+
+	base = libbpf_load_vmlinux_btf();
+	err = libbpf_get_error(base);
+	if (err) {
+		pr_warn("Error loading vmlinux BTF: %d\n", err);
+		return base;
+	}
+
+	snprintf(path, sizeof(path), "/sys/kernel/btf/%s", mod);
+	return btf__parse_split(path, base);
+}
+
 int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
 {
 	int i, n, err;
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 374e9f15de2e..d27cc2e2f220 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -90,6 +90,8 @@  LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
 LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
 
 LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
+LIBBPF_API struct btf *libbpf_load_vmlinux_btf(void);
+LIBBPF_API struct btf *libbpf_load_module_btf(const char *mod);
 
 LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
 LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index a53ca29b44ab..6e4ead454c0e 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -2685,7 +2685,7 @@  static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
 	if (!force && !obj_needs_vmlinux_btf(obj))
 		return 0;
 
-	obj->btf_vmlinux = libbpf_find_kernel_btf();
+	obj->btf_vmlinux = libbpf_load_vmlinux_btf();
 	err = libbpf_get_error(obj->btf_vmlinux);
 	if (err) {
 		pr_warn("Error loading vmlinux BTF: %d\n", err);
@@ -7236,7 +7236,7 @@  static int bpf_object__collect_relos(struct bpf_object *obj)
 
 	for (i = 0; i < obj->nr_programs; i++) {
 		struct bpf_program *p = &obj->programs[i];
-		
+
 		if (!p->nr_reloc)
 			continue;
 
@@ -9562,7 +9562,7 @@  int libbpf_find_vmlinux_btf_id(const char *name,
 	struct btf *btf;
 	int err;
 
-	btf = libbpf_find_kernel_btf();
+	btf = libbpf_load_vmlinux_btf();
 	err = libbpf_get_error(btf);
 	if (err) {
 		pr_warn("vmlinux BTF is not found\n");
@@ -10080,7 +10080,7 @@  struct bpf_link {
 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
 {
 	int ret;
-	
+
 	ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
 	return libbpf_err_errno(ret);
 }
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index c240d488eb5e..2088bdbc0f50 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -377,4 +377,6 @@  LIBBPF_0.5.0 {
 		bpf_object__gen_loader;
 		btf_dump__dump_type_data;
 		libbpf_set_strict_mode;
+		libbpf_load_vmlinux_btf;
+		libbpf_load_module_btf;
 } LIBBPF_0.4.0;