diff mbox series

[RFC/PATCH,1/3] btf_encoder: Generate also .init functions

Message ID 20201112150506.705430-2-jolsa@kernel.org (mailing list archive)
State Superseded
Headers show
Series btf_encoder: Fix functions BTF data generation | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

Jiri Olsa Nov. 12, 2020, 3:05 p.m. UTC
Currently we skip functions under .init* sections, Removing the .init*
section check, BTF now contains also functions from .init* sections.

Andrii's explanation from email:

> ...                  I think we should just drop the __init check and
> include all the __init functions into BTF. There could be cases where
> we'd need to attach BPF programs to __init functions (e.g., bpf_lsm
> security cases), so having BTFs for those FUNCs are necessary as well.
> Ftrace currently disallows that, but it's only because no user-space
> application has a way to attach probes early enough. This might change
> in the future, so there is no need to invent special mechanisms now
> for bpf_iter function preservation. Let's just include all __init
> functions in BTF.

It's over ~2000 functions on my .config:

   $ bpftool btf dump file ./vmlinux | grep 'FUNC ' | wc -l
   41505
   $ bpftool btf dump file /sys/kernel/btf/vmlinux | grep 'FUNC ' | wc -l
   39256

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
 btf_encoder.c | 43 ++-----------------------------------------
 1 file changed, 2 insertions(+), 41 deletions(-)

Comments

Andrii Nakryiko Nov. 12, 2020, 7:37 p.m. UTC | #1
On Thu, Nov 12, 2020 at 7:05 AM Jiri Olsa <jolsa@kernel.org> wrote:
>
> Currently we skip functions under .init* sections, Removing the .init*
> section check, BTF now contains also functions from .init* sections.
>
> Andrii's explanation from email:
>
> > ...                  I think we should just drop the __init check and
> > include all the __init functions into BTF. There could be cases where
> > we'd need to attach BPF programs to __init functions (e.g., bpf_lsm
> > security cases), so having BTFs for those FUNCs are necessary as well.
> > Ftrace currently disallows that, but it's only because no user-space
> > application has a way to attach probes early enough. This might change
> > in the future, so there is no need to invent special mechanisms now
> > for bpf_iter function preservation. Let's just include all __init
> > functions in BTF.
>
> It's over ~2000 functions on my .config:
>
>    $ bpftool btf dump file ./vmlinux | grep 'FUNC ' | wc -l
>    41505
>    $ bpftool btf dump file /sys/kernel/btf/vmlinux | grep 'FUNC ' | wc -l
>    39256
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---

Looks good.

Acked-by: Andrii Nakryiko <andrii@kernel.org>

>  btf_encoder.c | 43 ++-----------------------------------------
>  1 file changed, 2 insertions(+), 41 deletions(-)
>

[...]
diff mbox series

Patch

diff --git a/btf_encoder.c b/btf_encoder.c
index 9b93e9963727..d531651b1e9e 100644
--- a/btf_encoder.c
+++ b/btf_encoder.c
@@ -29,10 +29,6 @@ 
 struct funcs_layout {
 	unsigned long mcount_start;
 	unsigned long mcount_stop;
-	unsigned long init_begin;
-	unsigned long init_end;
-	unsigned long init_bpf_begin;
-	unsigned long init_bpf_end;
 	unsigned long mcount_sec_idx;
 };
 
@@ -104,16 +100,6 @@  static int addrs_cmp(const void *_a, const void *_b)
 	return *a < *b ? -1 : 1;
 }
 
-static bool is_init(struct funcs_layout *fl, unsigned long addr)
-{
-	return addr >= fl->init_begin && addr < fl->init_end;
-}
-
-static bool is_bpf_init(struct funcs_layout *fl, unsigned long addr)
-{
-	return addr >= fl->init_bpf_begin && addr < fl->init_bpf_end;
-}
-
 static int filter_functions(struct btf_elf *btfe, struct funcs_layout *fl)
 {
 	unsigned long *addrs, count, offset, i;
@@ -155,18 +141,11 @@  static int filter_functions(struct btf_elf *btfe, struct funcs_layout *fl)
 
 	/*
 	 * Let's got through all collected functions and filter
-	 * out those that are not in ftrace and init code.
+	 * out those that are not in ftrace.
 	 */
 	for (i = 0; i < functions_cnt; i++) {
 		struct elf_function *func = &functions[i];
 
-		/*
-		 * Do not enable .init section functions,
-		 * but keep .init.bpf.preserve_type functions.
-		 */
-		if (is_init(fl, func->addr) && !is_bpf_init(fl, func->addr))
-			continue;
-
 		/* Make sure function is within ftrace addresses. */
 		if (bsearch(&func->addr, addrs, count, sizeof(addrs[0]), addrs_cmp)) {
 			/*
@@ -493,29 +472,11 @@  static void collect_symbol(GElf_Sym *sym, struct funcs_layout *fl)
 	if (!fl->mcount_stop &&
 	    !strcmp("__stop_mcount_loc", elf_sym__name(sym, btfe->symtab)))
 		fl->mcount_stop = sym->st_value;
-
-	if (!fl->init_begin &&
-	    !strcmp("__init_begin", elf_sym__name(sym, btfe->symtab)))
-		fl->init_begin = sym->st_value;
-
-	if (!fl->init_end &&
-	    !strcmp("__init_end", elf_sym__name(sym, btfe->symtab)))
-		fl->init_end = sym->st_value;
-
-	if (!fl->init_bpf_begin &&
-	    !strcmp("__init_bpf_preserve_type_begin", elf_sym__name(sym, btfe->symtab)))
-		fl->init_bpf_begin = sym->st_value;
-
-	if (!fl->init_bpf_end &&
-	    !strcmp("__init_bpf_preserve_type_end", elf_sym__name(sym, btfe->symtab)))
-		fl->init_bpf_end = sym->st_value;
 }
 
 static int has_all_symbols(struct funcs_layout *fl)
 {
-	return fl->mcount_start && fl->mcount_stop &&
-	       fl->init_begin && fl->init_end &&
-	       fl->init_bpf_begin && fl->init_bpf_end;
+	return fl->mcount_start && fl->mcount_stop;
 }
 
 static int collect_symbols(struct btf_elf *btfe, bool collect_percpu_vars)