@@ -42,13 +42,13 @@ static void test_btf_id_or_null(void)
}
}
-static void do_dummy_read(struct bpf_program *prog)
+static void do_dummy_read(struct bpf_program *prog, struct bpf_iter_attach_opts *opts)
{
struct bpf_link *link;
char buf[16] = {};
int iter_fd, len;
- link = bpf_program__attach_iter(prog, NULL);
+ link = bpf_program__attach_iter(prog, opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
return;
@@ -91,7 +91,7 @@ static void test_ipv6_route(void)
if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_ipv6_route);
+ do_dummy_read(skel->progs.dump_ipv6_route, NULL);
bpf_iter_ipv6_route__destroy(skel);
}
@@ -104,7 +104,7 @@ static void test_netlink(void)
if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_netlink);
+ do_dummy_read(skel->progs.dump_netlink, NULL);
bpf_iter_netlink__destroy(skel);
}
@@ -117,20 +117,70 @@ static void test_bpf_map(void)
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_bpf_map);
+ do_dummy_read(skel->progs.dump_bpf_map, NULL);
bpf_iter_bpf_map__destroy(skel);
}
+static void check_bpf_link_info(const struct bpf_program *prog)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ struct bpf_link_info info = {};
+ __u32 info_len;
+ struct bpf_link *link;
+ int err;
+
+ linfo.task.tid = getpid();
+ linfo.task.type = BPF_TASK_ITER_TID;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ link = bpf_program__attach_iter(prog, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
+ return;
+
+ info_len = sizeof(info);
+ err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
+ if (ASSERT_OK(err, "bpf_obj_get_info_by_fd")) {
+ ASSERT_EQ(info.iter.task.type, BPF_TASK_ITER_TID, "check_task_type");
+ ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
+ }
+
+ bpf_link__destroy(link);
+}
+
static void test_task(void)
{
struct bpf_iter_task *skel;
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
skel = bpf_iter_task__open_and_load();
if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_task);
+ linfo.task.tid = getpid();
+ linfo.task.type = BPF_TASK_ITER_TID;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ skel->bss->tid = getpid();
+
+ do_dummy_read(skel->progs.dump_task, &opts);
+
+ ASSERT_EQ(skel->bss->num_unknown_tid, 0, "check_num_unknown_tid");
+ ASSERT_EQ(skel->bss->num_known_tid, 1, "check_num_known_tid");
+
+ skel->bss->num_unknown_tid = 0;
+ skel->bss->num_known_tid = 0;
+
+ do_dummy_read(skel->progs.dump_task, NULL);
+
+ ASSERT_GE(skel->bss->num_unknown_tid, 0, "check_num_unknown_tid");
+ ASSERT_EQ(skel->bss->num_known_tid, 1, "check_num_known_tid");
+
+ check_bpf_link_info(skel->progs.dump_task);
bpf_iter_task__destroy(skel);
}
@@ -143,7 +193,7 @@ static void test_task_sleepable(void)
if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_task_sleepable);
+ do_dummy_read(skel->progs.dump_task_sleepable, NULL);
ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
"num_expected_failure_copy_from_user_task");
@@ -161,8 +211,8 @@ static void test_task_stack(void)
if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_task_stack);
- do_dummy_read(skel->progs.get_task_user_stacks);
+ do_dummy_read(skel->progs.dump_task_stack, NULL);
+ do_dummy_read(skel->progs.get_task_user_stacks, NULL);
bpf_iter_task_stack__destroy(skel);
}
@@ -174,7 +224,9 @@ static void *do_nothing(void *arg)
static void test_task_file(void)
{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_task_file *skel;
+ union bpf_iter_link_info linfo;
pthread_t thread_id;
void *ret;
@@ -188,15 +240,31 @@ static void test_task_file(void)
"pthread_create"))
goto done;
- do_dummy_read(skel->progs.dump_task_file);
+ linfo.task.tid = getpid();
+ linfo.task.type = BPF_TASK_ITER_TID;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ do_dummy_read(skel->progs.dump_task_file, &opts);
if (!ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
"pthread_join"))
goto done;
ASSERT_EQ(skel->bss->count, 0, "check_count");
+ ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
+
+ skel->bss->count = 0;
+ skel->bss->unique_tgid_count = 0;
+
+ do_dummy_read(skel->progs.dump_task_file, NULL);
-done:
+ ASSERT_GE(skel->bss->count, 0, "check_count");
+ ASSERT_GE(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
+
+ check_bpf_link_info(skel->progs.dump_task_file);
+
+ done:
bpf_iter_task_file__destroy(skel);
}
@@ -274,7 +342,7 @@ static void test_tcp4(void)
if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_tcp4);
+ do_dummy_read(skel->progs.dump_tcp4, NULL);
bpf_iter_tcp4__destroy(skel);
}
@@ -287,7 +355,7 @@ static void test_tcp6(void)
if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_tcp6);
+ do_dummy_read(skel->progs.dump_tcp6, NULL);
bpf_iter_tcp6__destroy(skel);
}
@@ -300,7 +368,7 @@ static void test_udp4(void)
if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_udp4);
+ do_dummy_read(skel->progs.dump_udp4, NULL);
bpf_iter_udp4__destroy(skel);
}
@@ -313,7 +381,7 @@ static void test_udp6(void)
if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_udp6);
+ do_dummy_read(skel->progs.dump_udp6, NULL);
bpf_iter_udp6__destroy(skel);
}
@@ -326,7 +394,7 @@ static void test_unix(void)
if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_unix);
+ do_dummy_read(skel->progs.dump_unix, NULL);
bpf_iter_unix__destroy(skel);
}
@@ -988,7 +1056,7 @@ static void test_bpf_sk_storage_get(void)
if (!ASSERT_OK(err, "bpf_map_update_elem"))
goto close_socket;
- do_dummy_read(skel->progs.fill_socket_owner);
+ do_dummy_read(skel->progs.fill_socket_owner, NULL);
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
@@ -996,7 +1064,7 @@ static void test_bpf_sk_storage_get(void)
getpid(), val, err))
goto close_socket;
- do_dummy_read(skel->progs.negate_socket_local_storage);
+ do_dummy_read(skel->progs.negate_socket_local_storage, NULL);
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
@@ -1116,7 +1184,7 @@ static void test_link_iter(void)
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_bpf_link);
+ do_dummy_read(skel->progs.dump_bpf_link, NULL);
bpf_iter_bpf_link__destroy(skel);
}
@@ -1129,7 +1197,7 @@ static void test_ksym_iter(void)
if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
return;
- do_dummy_read(skel->progs.dump_ksym);
+ do_dummy_read(skel->progs.dump_ksym, NULL);
bpf_iter_ksym__destroy(skel);
}
@@ -1154,7 +1222,7 @@ static void str_strip_first_line(char *str)
*dst = '\0';
}
-static void test_task_vma(void)
+static void test_task_vma_(struct bpf_iter_attach_opts *opts)
{
int err, iter_fd = -1, proc_maps_fd = -1;
struct bpf_iter_task_vma *skel;
@@ -1166,13 +1234,14 @@ static void test_task_vma(void)
return;
skel->bss->pid = getpid();
+ skel->bss->one_task = opts ? 1 : 0;
err = bpf_iter_task_vma__load(skel);
if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
goto out;
skel->links.proc_maps = bpf_program__attach_iter(
- skel->progs.proc_maps, NULL);
+ skel->progs.proc_maps, opts);
if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
skel->links.proc_maps = NULL;
@@ -1211,12 +1280,30 @@ static void test_task_vma(void)
str_strip_first_line(proc_maps_output);
ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
+
+ check_bpf_link_info(skel->progs.proc_maps);
+
out:
close(proc_maps_fd);
close(iter_fd);
bpf_iter_task_vma__destroy(skel);
}
+static void test_task_vma(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.task.tid = getpid();
+ linfo.task.type = BPF_TASK_ITER_TID;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ test_task_vma_(&opts);
+ test_task_vma_(NULL);
+}
+
void test_bpf_iter(void)
{
if (test__start_subtest("btf_id_or_null"))
@@ -764,7 +764,7 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d,
/* union with nested struct */
TEST_BTF_DUMP_DATA(btf, d, "union", str, union bpf_iter_link_info, BTF_F_COMPACT,
- "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},}",
+ "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.task = (struct){.tid = (__u32)1,},}",
{ .map = { .map_fd = 1 }});
/* struct skb with nested structs/unions; because type output is so
@@ -6,6 +6,10 @@
char _license[] SEC("license") = "GPL";
+uint32_t tid = 0;
+int num_unknown_tid = 0;
+int num_known_tid = 0;
+
SEC("iter/task")
int dump_task(struct bpf_iter__task *ctx)
{
@@ -18,6 +22,11 @@ int dump_task(struct bpf_iter__task *ctx)
return 0;
}
+ if (task->pid != tid)
+ num_unknown_tid++;
+ else
+ num_known_tid++;
+
if (ctx->meta->seq_num == 0)
BPF_SEQ_PRINTF(seq, " tgid gid\n");
@@ -7,6 +7,8 @@ char _license[] SEC("license") = "GPL";
int count = 0;
int tgid = 0;
+int last_tgid = -1;
+int unique_tgid_count = 0;
SEC("iter/task_file")
int dump_task_file(struct bpf_iter__task_file *ctx)
@@ -27,6 +29,11 @@ int dump_task_file(struct bpf_iter__task_file *ctx)
if (tgid == task->tgid && task->tgid != task->pid)
count++;
+ if (last_tgid != task->tgid) {
+ last_tgid = task->tgid;
+ unique_tgid_count++;
+ }
+
BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd,
(long)file->f_op);
return 0;
@@ -20,6 +20,7 @@ char _license[] SEC("license") = "GPL";
#define D_PATH_BUF_SIZE 1024
char d_path_buf[D_PATH_BUF_SIZE] = {};
__u32 pid = 0;
+__u32 one_task = 0;
SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx)
{
@@ -33,8 +34,11 @@ SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx)
return 0;
file = vma->vm_file;
- if (task->tgid != pid)
+ if (task->tgid != pid) {
+ if (one_task)
+ BPF_SEQ_PRINTF(seq, "unexpected task (%d != %d)", task->tgid, pid);
return 0;
+ }
perm_str[0] = (vma->vm_flags & VM_READ) ? 'r' : '-';
perm_str[1] = (vma->vm_flags & VM_WRITE) ? 'w' : '-';
perm_str[2] = (vma->vm_flags & VM_EXEC) ? 'x' : '-';
Test iterators of vma, files, and tasks of a specific task. Ensure the API works appropriately for both going through all tasks and going through a particular task. Signed-off-by: Kui-Feng Lee <kuifeng@fb.com> --- .../selftests/bpf/prog_tests/bpf_iter.c | 131 +++++++++++++++--- .../selftests/bpf/prog_tests/btf_dump.c | 2 +- .../selftests/bpf/progs/bpf_iter_task.c | 9 ++ .../selftests/bpf/progs/bpf_iter_task_file.c | 7 + .../selftests/bpf/progs/bpf_iter_task_vma.c | 6 +- 5 files changed, 131 insertions(+), 24 deletions(-)