Message ID | 1657113391-5624-2-git-send-email-alan.maguire@oracle.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | bpf: add a ksym BPF iterator | expand |
On Wed, Jul 6, 2022 at 6:17 AM Alan Maguire <alan.maguire@oracle.com> wrote: > > add a "ksym" iterator which provides access to a "struct kallsym_iter" > for each symbol. Intent is to support more flexible symbol parsing > as discussed in [1]. > > [1] https://lore.kernel.org/all/YjRPZj6Z8vuLeEZo@krava/ > > Suggested-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> > Signed-off-by: Alan Maguire <alan.maguire@oracle.com> > Acked-by: Yonghong Song <yhs@fb.com> > --- > kernel/kallsyms.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 95 insertions(+) > LGTM, except for unnecessary pr_warn(), see below [...] > + > +BTF_ID_LIST(btf_ksym_iter_id) > +BTF_ID(struct, kallsym_iter) > + > +static int __init bpf_ksym_iter_register(void) > +{ > + int ret; > + > + ksym_iter_reg_info.ctx_arg_info[0].btf_id = *btf_ksym_iter_id; > + ret = bpf_iter_reg_target(&ksym_iter_reg_info); > + if (ret) > + pr_warn("Warning: could not register bpf ksym iterator: %d\n", ret); we don't emit such warnings for some other iterators I checked (map, link, etc). Do we really need this? It's very unlikely to happen anyways. > + return ret; > +} > + > +late_initcall(bpf_ksym_iter_register); > + > +#endif /* CONFIG_BPF_SYSCALL */ > + > static inline int kallsyms_for_perf(void) > { > #ifdef CONFIG_PERF_EVENTS > -- > 1.8.3.1 >
Hi Alan, On Wed, Jul 6, 2022 at 6:17 AM Alan Maguire <alan.maguire@oracle.com> wrote: > > add a "ksym" iterator which provides access to a "struct kallsym_iter" > for each symbol. Intent is to support more flexible symbol parsing > as discussed in [1]. > > [1] https://lore.kernel.org/all/YjRPZj6Z8vuLeEZo@krava/ > > Suggested-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> > Signed-off-by: Alan Maguire <alan.maguire@oracle.com> > Acked-by: Yonghong Song <yhs@fb.com> > --- > kernel/kallsyms.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 95 insertions(+) > [...] > + > +static struct bpf_iter_reg ksym_iter_reg_info = { > + .target = "ksym", > + .ctx_arg_info_size = 1, > + .ctx_arg_info = { > + { offsetof(struct bpf_iter__ksym, ksym), > + PTR_TO_BTF_ID_OR_NULL }, > + }, > + .seq_info = &ksym_iter_seq_info, > +}; > + Can we add allow resched here? .feature = BPF_ITER_RESCHED, I think this will improve the responsiveness of the kernel when iterating ksyms. Thanks, Hao > +BTF_ID_LIST(btf_ksym_iter_id) > +BTF_ID(struct, kallsym_iter) > + > +static int __init bpf_ksym_iter_register(void) > +{ > + int ret; > + > + ksym_iter_reg_info.ctx_arg_info[0].btf_id = *btf_ksym_iter_id; > + ret = bpf_iter_reg_target(&ksym_iter_reg_info); > + if (ret) > + pr_warn("Warning: could not register bpf ksym iterator: %d\n", ret); > + return ret; > +} > + > +late_initcall(bpf_ksym_iter_register); > + > +#endif /* CONFIG_BPF_SYSCALL */ > + > static inline int kallsyms_for_perf(void) > { > #ifdef CONFIG_PERF_EVENTS > -- > 1.8.3.1 >
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index fbdf8d3..5748020 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -30,6 +30,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/bsearch.h> +#include <linux/btf_ids.h> /* * These will be re-linked against their real values @@ -799,6 +800,100 @@ static int s_show(struct seq_file *m, void *p) .show = s_show }; +#ifdef CONFIG_BPF_SYSCALL + +struct bpf_iter__ksym { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct kallsym_iter *, ksym); +}; + +static int ksym_prog_seq_show(struct seq_file *m, bool in_stop) +{ + struct bpf_iter__ksym ctx; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + + meta.seq = m; + prog = bpf_iter_get_info(&meta, in_stop); + if (!prog) + return 0; + + ctx.meta = &meta; + ctx.ksym = m ? m->private : NULL; + return bpf_iter_run_prog(prog, &ctx); +} + +static int bpf_iter_ksym_seq_show(struct seq_file *m, void *p) +{ + return ksym_prog_seq_show(m, false); +} + +static void bpf_iter_ksym_seq_stop(struct seq_file *m, void *p) +{ + if (!p) + (void) ksym_prog_seq_show(m, true); + else + s_stop(m, p); +} + +static const struct seq_operations bpf_iter_ksym_ops = { + .start = s_start, + .next = s_next, + .stop = bpf_iter_ksym_seq_stop, + .show = bpf_iter_ksym_seq_show, +}; + +static int bpf_iter_ksym_init(void *priv_data, struct bpf_iter_aux_info *aux) +{ + struct kallsym_iter *iter = priv_data; + + reset_iter(iter, 0); + + /* cache here as in kallsyms_open() case; use current process + * credentials to tell BPF iterators if values should be shown. + */ + iter->show_value = kallsyms_show_value(current_cred()); + + return 0; +} + +DEFINE_BPF_ITER_FUNC(ksym, struct bpf_iter_meta *meta, struct kallsym_iter *ksym) + +static const struct bpf_iter_seq_info ksym_iter_seq_info = { + .seq_ops = &bpf_iter_ksym_ops, + .init_seq_private = bpf_iter_ksym_init, + .fini_seq_private = NULL, + .seq_priv_size = sizeof(struct kallsym_iter), +}; + +static struct bpf_iter_reg ksym_iter_reg_info = { + .target = "ksym", + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__ksym, ksym), + PTR_TO_BTF_ID_OR_NULL }, + }, + .seq_info = &ksym_iter_seq_info, +}; + +BTF_ID_LIST(btf_ksym_iter_id) +BTF_ID(struct, kallsym_iter) + +static int __init bpf_ksym_iter_register(void) +{ + int ret; + + ksym_iter_reg_info.ctx_arg_info[0].btf_id = *btf_ksym_iter_id; + ret = bpf_iter_reg_target(&ksym_iter_reg_info); + if (ret) + pr_warn("Warning: could not register bpf ksym iterator: %d\n", ret); + return ret; +} + +late_initcall(bpf_ksym_iter_register); + +#endif /* CONFIG_BPF_SYSCALL */ + static inline int kallsyms_for_perf(void) { #ifdef CONFIG_PERF_EVENTS