Message ID | 20210729233645.4869-2-kuniyu@amazon.co.jp (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | BPF iterator for UNIX domain socket. | expand |
Context | Check | Description |
---|---|---|
netdev/cover_letter | success | Link |
netdev/fixes_present | success | Link |
netdev/patch_count | success | Link |
netdev/tree_selection | success | Clearly marked for bpf-next |
netdev/subject_prefix | success | Link |
netdev/cc_maintainers | warning | 5 maintainers not CCed: viro@zeniv.linux.org.uk jolsa@kernel.org cong.wang@bytedance.com lmb@cloudflare.com christian.brauner@ubuntu.com |
netdev/source_inline | success | Was 0 now: 0 |
netdev/verify_signedoff | success | Link |
netdev/module_param | success | Was 0 now: 0 |
netdev/build_32bit | fail | Errors and warnings before: 126 this patch: 135 |
netdev/kdoc | success | Errors and warnings before: 0 this patch: 0 |
netdev/verify_fixes | success | Link |
netdev/checkpatch | warning | WARNING: externs should be avoided in .c files |
netdev/build_allmodconfig_warn | fail | Errors and warnings before: 126 this patch: 135 |
netdev/header_inline | success | Link |
Hi Kuniyuki, Thank you for the patch! Yet something to improve: [auto build test ERROR on bpf-next/master] url: https://github.com/0day-ci/linux/commits/Kuniyuki-Iwashima/BPF-iterator-for-UNIX-domain-socket/20210730-073919 base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master config: powerpc-randconfig-r031-20210730 (attached as .config) compiler: powerpc-linux-gcc (GCC) 10.3.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/0day-ci/linux/commit/4e824a068d6173a101001fd6a45acd79e938e420 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Kuniyuki-Iwashima/BPF-iterator-for-UNIX-domain-socket/20210730-073919 git checkout 4e824a068d6173a101001fd6a45acd79e938e420 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-10.3.0 make.cross ARCH=powerpc If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All errors (new ones prefixed by >>, old ones prefixed by <<): >> ERROR: modpost: "bpf_iter_fini_seq_net" [net/unix/unix.ko] undefined! >> ERROR: modpost: "bpf_iter_run_prog" [net/unix/unix.ko] undefined! >> ERROR: modpost: "bpf_iter_get_info" [net/unix/unix.ko] undefined! >> ERROR: modpost: "bpf_iter_reg_target" [net/unix/unix.ko] undefined! >> ERROR: modpost: "btf_sock_ids" [net/unix/unix.ko] undefined! >> ERROR: modpost: "bpf_iter_init_seq_net" [net/unix/unix.ko] undefined! --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
On 7/29/21 4:36 PM, Kuniyuki Iwashima wrote: > This patch implements the BPF iterator for the UNIX domain socket. > > Currently, the batch optimization introduced for the TCP iterator in the > commit 04c7820b776f ("bpf: tcp: Bpf iter batching and lock_sock") is not > applied. It will require replacing the big lock for the hash table with > small locks for each hash list not to block other processes. Thanks for the contribution. The patch looks okay except missing seq_ops->stop implementation, see below for more explanation. > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp> > --- > include/linux/btf_ids.h | 3 +- > net/unix/af_unix.c | 78 +++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 80 insertions(+), 1 deletion(-) > > diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h > index 57890b357f85..bed4b9964581 100644 > --- a/include/linux/btf_ids.h > +++ b/include/linux/btf_ids.h > @@ -172,7 +172,8 @@ extern struct btf_id_set name; > BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ > BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ > BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ > - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) > + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \ > + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) > > enum { > #define BTF_SOCK_TYPE(name, str) name, > diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c > index 89927678c0dc..d45ad87e3a49 100644 > --- a/net/unix/af_unix.c > +++ b/net/unix/af_unix.c > @@ -113,6 +113,7 @@ > #include <linux/security.h> > #include <linux/freezer.h> > #include <linux/file.h> > +#include <linux/btf_ids.h> > > #include "scm.h" > > @@ -2935,6 +2936,49 @@ static const struct seq_operations unix_seq_ops = { > .stop = unix_seq_stop, > .show = unix_seq_show, > }; > + > +#ifdef CONFIG_BPF_SYSCALL > +struct bpf_iter__unix { > + __bpf_md_ptr(struct bpf_iter_meta *, meta); > + __bpf_md_ptr(struct unix_sock *, unix_sk); > + uid_t uid __aligned(8); > +}; > + > +static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, > + struct unix_sock *unix_sk, uid_t uid) > +{ > + struct bpf_iter__unix ctx; > + > + meta->seq_num--; /* skip SEQ_START_TOKEN */ > + ctx.meta = meta; > + ctx.unix_sk = unix_sk; > + ctx.uid = uid; > + return bpf_iter_run_prog(prog, &ctx); > +} > + > +static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v) > +{ > + struct bpf_iter_meta meta; > + struct bpf_prog *prog; > + struct sock *sk = v; > + uid_t uid; > + > + if (v == SEQ_START_TOKEN) > + return 0; > + > + uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); > + meta.seq = seq; > + prog = bpf_iter_get_info(&meta, false); > + return unix_prog_seq_show(prog, &meta, v, uid); > +} > + > +static const struct seq_operations bpf_iter_unix_seq_ops = { > + .start = unix_seq_start, > + .next = unix_seq_next, > + .stop = unix_seq_stop, Although it is not required for /proc/net/unix, we should still implement bpf_iter version of seq_ops->stop here. The main purpose of bpf_iter specific seq_ops->stop is to call bpf program one more time after ALL elements have been traversed. Such functionality is implemented in all other bpf_iter variants. > + .show = bpf_iter_unix_seq_show, > +}; > +#endif > #endif > > static const struct net_proto_family unix_family_ops = { > @@ -2975,6 +3019,35 @@ static struct pernet_operations unix_net_ops = { > .exit = unix_net_exit, > }; > [...]
From: Yonghong Song <yhs@fb.com> Date: Thu, 29 Jul 2021 23:24:41 -0700 > On 7/29/21 4:36 PM, Kuniyuki Iwashima wrote: > > This patch implements the BPF iterator for the UNIX domain socket. > > > > Currently, the batch optimization introduced for the TCP iterator in the > > commit 04c7820b776f ("bpf: tcp: Bpf iter batching and lock_sock") is not > > applied. It will require replacing the big lock for the hash table with > > small locks for each hash list not to block other processes. > > Thanks for the contribution. The patch looks okay except > missing seq_ops->stop implementation, see below for more explanation. > > > > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp> > > --- > > include/linux/btf_ids.h | 3 +- > > net/unix/af_unix.c | 78 +++++++++++++++++++++++++++++++++++++++++ > > 2 files changed, 80 insertions(+), 1 deletion(-) > > > > diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h > > index 57890b357f85..bed4b9964581 100644 > > --- a/include/linux/btf_ids.h > > +++ b/include/linux/btf_ids.h > > @@ -172,7 +172,8 @@ extern struct btf_id_set name; > > BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ > > BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ > > BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ > > - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) > > + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \ > > + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) > > > > enum { > > #define BTF_SOCK_TYPE(name, str) name, > > diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c > > index 89927678c0dc..d45ad87e3a49 100644 > > --- a/net/unix/af_unix.c > > +++ b/net/unix/af_unix.c > > @@ -113,6 +113,7 @@ > > #include <linux/security.h> > > #include <linux/freezer.h> > > #include <linux/file.h> > > +#include <linux/btf_ids.h> > > > > #include "scm.h" > > > > @@ -2935,6 +2936,49 @@ static const struct seq_operations unix_seq_ops = { > > .stop = unix_seq_stop, > > .show = unix_seq_show, > > }; > > + > > +#ifdef CONFIG_BPF_SYSCALL > > +struct bpf_iter__unix { > > + __bpf_md_ptr(struct bpf_iter_meta *, meta); > > + __bpf_md_ptr(struct unix_sock *, unix_sk); > > + uid_t uid __aligned(8); > > +}; > > + > > +static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, > > + struct unix_sock *unix_sk, uid_t uid) > > +{ > > + struct bpf_iter__unix ctx; > > + > > + meta->seq_num--; /* skip SEQ_START_TOKEN */ > > + ctx.meta = meta; > > + ctx.unix_sk = unix_sk; > > + ctx.uid = uid; > > + return bpf_iter_run_prog(prog, &ctx); > > +} > > + > > +static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v) > > +{ > > + struct bpf_iter_meta meta; > > + struct bpf_prog *prog; > > + struct sock *sk = v; > > + uid_t uid; > > + > > + if (v == SEQ_START_TOKEN) > > + return 0; > > + > > + uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); > > + meta.seq = seq; > > + prog = bpf_iter_get_info(&meta, false); > > + return unix_prog_seq_show(prog, &meta, v, uid); > > +} > > + > > +static const struct seq_operations bpf_iter_unix_seq_ops = { > > + .start = unix_seq_start, > > + .next = unix_seq_next, > > + .stop = unix_seq_stop, > > Although it is not required for /proc/net/unix, we should still > implement bpf_iter version of seq_ops->stop here. The main purpose > of bpf_iter specific seq_ops->stop is to call bpf program one > more time after ALL elements have been traversed. Such > functionality is implemented in all other bpf_iter variants. Thanks for your review! I will implement the extra call in the next spin. Just out of curiosity, is there a specific use case for the last call?
On 7/29/21 11:53 PM, Kuniyuki Iwashima wrote: > From: Yonghong Song <yhs@fb.com> > Date: Thu, 29 Jul 2021 23:24:41 -0700 >> On 7/29/21 4:36 PM, Kuniyuki Iwashima wrote: >>> This patch implements the BPF iterator for the UNIX domain socket. >>> >>> Currently, the batch optimization introduced for the TCP iterator in the >>> commit 04c7820b776f ("bpf: tcp: Bpf iter batching and lock_sock") is not >>> applied. It will require replacing the big lock for the hash table with >>> small locks for each hash list not to block other processes. >> >> Thanks for the contribution. The patch looks okay except >> missing seq_ops->stop implementation, see below for more explanation. >> >>> >>> Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp> >>> --- >>> include/linux/btf_ids.h | 3 +- >>> net/unix/af_unix.c | 78 +++++++++++++++++++++++++++++++++++++++++ >>> 2 files changed, 80 insertions(+), 1 deletion(-) >>> >>> diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h >>> index 57890b357f85..bed4b9964581 100644 >>> --- a/include/linux/btf_ids.h >>> +++ b/include/linux/btf_ids.h >>> @@ -172,7 +172,8 @@ extern struct btf_id_set name; >>> BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ >>> BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ >>> BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ >>> - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) >>> + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \ >>> + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) >>> >>> enum { >>> #define BTF_SOCK_TYPE(name, str) name, >>> diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c >>> index 89927678c0dc..d45ad87e3a49 100644 >>> --- a/net/unix/af_unix.c >>> +++ b/net/unix/af_unix.c >>> @@ -113,6 +113,7 @@ >>> #include <linux/security.h> >>> #include <linux/freezer.h> >>> #include <linux/file.h> >>> +#include <linux/btf_ids.h> >>> >>> #include "scm.h" >>> >>> @@ -2935,6 +2936,49 @@ static const struct seq_operations unix_seq_ops = { >>> .stop = unix_seq_stop, >>> .show = unix_seq_show, >>> }; >>> + >>> +#ifdef CONFIG_BPF_SYSCALL >>> +struct bpf_iter__unix { >>> + __bpf_md_ptr(struct bpf_iter_meta *, meta); >>> + __bpf_md_ptr(struct unix_sock *, unix_sk); >>> + uid_t uid __aligned(8); >>> +}; >>> + >>> +static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, >>> + struct unix_sock *unix_sk, uid_t uid) >>> +{ >>> + struct bpf_iter__unix ctx; >>> + >>> + meta->seq_num--; /* skip SEQ_START_TOKEN */ >>> + ctx.meta = meta; >>> + ctx.unix_sk = unix_sk; >>> + ctx.uid = uid; >>> + return bpf_iter_run_prog(prog, &ctx); >>> +} >>> + >>> +static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v) >>> +{ >>> + struct bpf_iter_meta meta; >>> + struct bpf_prog *prog; >>> + struct sock *sk = v; >>> + uid_t uid; >>> + >>> + if (v == SEQ_START_TOKEN) >>> + return 0; >>> + >>> + uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); >>> + meta.seq = seq; >>> + prog = bpf_iter_get_info(&meta, false); >>> + return unix_prog_seq_show(prog, &meta, v, uid); >>> +} >>> + >>> +static const struct seq_operations bpf_iter_unix_seq_ops = { >>> + .start = unix_seq_start, >>> + .next = unix_seq_next, >>> + .stop = unix_seq_stop, >> >> Although it is not required for /proc/net/unix, we should still >> implement bpf_iter version of seq_ops->stop here. The main purpose >> of bpf_iter specific seq_ops->stop is to call bpf program one >> more time after ALL elements have been traversed. Such >> functionality is implemented in all other bpf_iter variants. > > Thanks for your review! > I will implement the extra call in the next spin. > > Just out of curiosity, is there a specific use case for the last call? We don't have use cases for dumps similar to /proc/net/... etc. The original thinking is to permit in-kernel aggregation and the seq_ops->stop() bpf program will have an indication as the last bpf program invocation for the iterator at which point bpf program may wrap up aggregation and send/signal the result to user space. I am not sure whether people already used this feature or not, or people may have different way to do that (e.g., from user space directly checking map value if read() length is 0). But bpf seq_ops->stop() provides an in-kernel way for bpf program to respond to the end of iterating. >
From: Yonghong Song <yhs@fb.com> Date: Fri, 30 Jul 2021 00:09:08 -0700 > On 7/29/21 11:53 PM, Kuniyuki Iwashima wrote: > > From: Yonghong Song <yhs@fb.com> > > Date: Thu, 29 Jul 2021 23:24:41 -0700 > >> On 7/29/21 4:36 PM, Kuniyuki Iwashima wrote: > >>> This patch implements the BPF iterator for the UNIX domain socket. > >>> > >>> Currently, the batch optimization introduced for the TCP iterator in the > >>> commit 04c7820b776f ("bpf: tcp: Bpf iter batching and lock_sock") is not > >>> applied. It will require replacing the big lock for the hash table with > >>> small locks for each hash list not to block other processes. > >> > >> Thanks for the contribution. The patch looks okay except > >> missing seq_ops->stop implementation, see below for more explanation. > >> > >>> > >>> Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp> > >>> --- > >>> include/linux/btf_ids.h | 3 +- > >>> net/unix/af_unix.c | 78 +++++++++++++++++++++++++++++++++++++++++ > >>> 2 files changed, 80 insertions(+), 1 deletion(-) > >>> > >>> diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h > >>> index 57890b357f85..bed4b9964581 100644 > >>> --- a/include/linux/btf_ids.h > >>> +++ b/include/linux/btf_ids.h > >>> @@ -172,7 +172,8 @@ extern struct btf_id_set name; > >>> BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ > >>> BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ > >>> BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ > >>> - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) > >>> + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \ > >>> + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) > >>> > >>> enum { > >>> #define BTF_SOCK_TYPE(name, str) name, > >>> diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c > >>> index 89927678c0dc..d45ad87e3a49 100644 > >>> --- a/net/unix/af_unix.c > >>> +++ b/net/unix/af_unix.c > >>> @@ -113,6 +113,7 @@ > >>> #include <linux/security.h> > >>> #include <linux/freezer.h> > >>> #include <linux/file.h> > >>> +#include <linux/btf_ids.h> > >>> > >>> #include "scm.h" > >>> > >>> @@ -2935,6 +2936,49 @@ static const struct seq_operations unix_seq_ops = { > >>> .stop = unix_seq_stop, > >>> .show = unix_seq_show, > >>> }; > >>> + > >>> +#ifdef CONFIG_BPF_SYSCALL > >>> +struct bpf_iter__unix { > >>> + __bpf_md_ptr(struct bpf_iter_meta *, meta); > >>> + __bpf_md_ptr(struct unix_sock *, unix_sk); > >>> + uid_t uid __aligned(8); > >>> +}; > >>> + > >>> +static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, > >>> + struct unix_sock *unix_sk, uid_t uid) > >>> +{ > >>> + struct bpf_iter__unix ctx; > >>> + > >>> + meta->seq_num--; /* skip SEQ_START_TOKEN */ > >>> + ctx.meta = meta; > >>> + ctx.unix_sk = unix_sk; > >>> + ctx.uid = uid; > >>> + return bpf_iter_run_prog(prog, &ctx); > >>> +} > >>> + > >>> +static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v) > >>> +{ > >>> + struct bpf_iter_meta meta; > >>> + struct bpf_prog *prog; > >>> + struct sock *sk = v; > >>> + uid_t uid; > >>> + > >>> + if (v == SEQ_START_TOKEN) > >>> + return 0; > >>> + > >>> + uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); > >>> + meta.seq = seq; > >>> + prog = bpf_iter_get_info(&meta, false); > >>> + return unix_prog_seq_show(prog, &meta, v, uid); > >>> +} > >>> + > >>> +static const struct seq_operations bpf_iter_unix_seq_ops = { > >>> + .start = unix_seq_start, > >>> + .next = unix_seq_next, > >>> + .stop = unix_seq_stop, > >> > >> Although it is not required for /proc/net/unix, we should still > >> implement bpf_iter version of seq_ops->stop here. The main purpose > >> of bpf_iter specific seq_ops->stop is to call bpf program one > >> more time after ALL elements have been traversed. Such > >> functionality is implemented in all other bpf_iter variants. > > > > Thanks for your review! > > I will implement the extra call in the next spin. > > > > Just out of curiosity, is there a specific use case for the last call? > > We don't have use cases for dumps similar to /proc/net/... etc. > The original thinking is to permit in-kernel aggregation and the > seq_ops->stop() bpf program will have an indication as the last > bpf program invocation for the iterator at which point bpf program > may wrap up aggregation and send/signal the result to user space. > I am not sure whether people already used this feature or not, or > people may have different way to do that (e.g., from user space > directly checking map value if read() length is 0). But > bpf seq_ops->stop() provides an in-kernel way for bpf program > to respond to the end of iterating. Aggregation, that makes sense. Thank you!
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 57890b357f85..bed4b9964581 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -172,7 +172,8 @@ extern struct btf_id_set name; BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) enum { #define BTF_SOCK_TYPE(name, str) name, diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 89927678c0dc..d45ad87e3a49 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -113,6 +113,7 @@ #include <linux/security.h> #include <linux/freezer.h> #include <linux/file.h> +#include <linux/btf_ids.h> #include "scm.h" @@ -2935,6 +2936,49 @@ static const struct seq_operations unix_seq_ops = { .stop = unix_seq_stop, .show = unix_seq_show, }; + +#ifdef CONFIG_BPF_SYSCALL +struct bpf_iter__unix { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct unix_sock *, unix_sk); + uid_t uid __aligned(8); +}; + +static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, + struct unix_sock *unix_sk, uid_t uid) +{ + struct bpf_iter__unix ctx; + + meta->seq_num--; /* skip SEQ_START_TOKEN */ + ctx.meta = meta; + ctx.unix_sk = unix_sk; + ctx.uid = uid; + return bpf_iter_run_prog(prog, &ctx); +} + +static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_prog *prog; + struct sock *sk = v; + uid_t uid; + + if (v == SEQ_START_TOKEN) + return 0; + + uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); + meta.seq = seq; + prog = bpf_iter_get_info(&meta, false); + return unix_prog_seq_show(prog, &meta, v, uid); +} + +static const struct seq_operations bpf_iter_unix_seq_ops = { + .start = unix_seq_start, + .next = unix_seq_next, + .stop = unix_seq_stop, + .show = bpf_iter_unix_seq_show, +}; +#endif #endif static const struct net_proto_family unix_family_ops = { @@ -2975,6 +3019,35 @@ static struct pernet_operations unix_net_ops = { .exit = unix_net_exit, }; +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) +DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta, + struct unix_sock *unix_sk, uid_t uid) + +static const struct bpf_iter_seq_info unix_seq_info = { + .seq_ops = &bpf_iter_unix_seq_ops, + .init_seq_private = bpf_iter_init_seq_net, + .fini_seq_private = bpf_iter_fini_seq_net, + .seq_priv_size = sizeof(struct seq_net_private), +}; + +static struct bpf_iter_reg unix_reg_info = { + .target = "unix", + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__unix, unix_sk), + PTR_TO_BTF_ID_OR_NULL }, + }, + .seq_info = &unix_seq_info, +}; + +static void __init bpf_iter_register(void) +{ + unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX]; + if (bpf_iter_reg_target(&unix_reg_info)) + pr_warn("Warning: could not register bpf iterator unix\n"); +} +#endif + static int __init af_unix_init(void) { int rc = -1; @@ -2990,6 +3063,11 @@ static int __init af_unix_init(void) sock_register(&unix_family_ops); register_pernet_subsys(&unix_net_ops); unix_bpf_build_proto(); + +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + bpf_iter_register(); +#endif + out: return rc; }
This patch implements the BPF iterator for the UNIX domain socket. Currently, the batch optimization introduced for the TCP iterator in the commit 04c7820b776f ("bpf: tcp: Bpf iter batching and lock_sock") is not applied. It will require replacing the big lock for the hash table with small locks for each hash list not to block other processes. Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp> --- include/linux/btf_ids.h | 3 +- net/unix/af_unix.c | 78 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 1 deletion(-)