diff mbox series

[bpf-next,v6,4/4] selftests/bpf: Test parameterized task BPF iterators.

Message ID 20220819220927.3409575-5-kuifeng@fb.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Parameterize task iterators. | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 14 maintainers not CCed: john.fastabend@gmail.com song@kernel.org sdf@google.com martin.lau@linux.dev xukuohai@huawei.com linux-kselftest@vger.kernel.org kennyyu@fb.com 9erthalion6@gmail.com kpsingh@kernel.org jolsa@kernel.org mykolal@fb.com shuah@kernel.org haoluo@google.com brauner@kernel.org
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning CHECK: Alignment should match open parenthesis CHECK: Comparison to NULL could be written "!task" CHECK: Comparison to NULL could be written "!vma" CHECK: Comparison to NULL could be written "ret" CHECK: Please don't use multiple blank lines WARNING: added, moved or deleted file(s), does MAINTAINERS need updating? WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 91 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns WARNING: line length of 95 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-4 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-5 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-1 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-6 success Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_verifier on s390x with gcc

Commit Message

Kui-Feng Lee Aug. 19, 2022, 10:09 p.m. UTC
Test iterators of vma, files, and tasks of tasks.

Ensure the API works appropriately to visit all tasks,
tasks in a process, or a particular task.

Signed-off-by: Kui-Feng Lee <kuifeng@fb.com>
---
 .../selftests/bpf/prog_tests/bpf_iter.c       | 284 +++++++++++++++++-
 .../selftests/bpf/prog_tests/btf_dump.c       |   2 +-
 .../selftests/bpf/progs/bpf_iter_task.c       |   9 +
 .../selftests/bpf/progs/bpf_iter_task_file.c  |   9 +-
 .../selftests/bpf/progs/bpf_iter_task_vma.c   |   6 +-
 .../bpf/progs/bpf_iter_uprobe_offset.c        |  35 +++
 6 files changed, 326 insertions(+), 19 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c

Comments

Yonghong Song Aug. 24, 2022, 8:50 p.m. UTC | #1
On 8/19/22 3:09 PM, Kui-Feng Lee wrote:
> Test iterators of vma, files, and tasks of tasks.
> 
> Ensure the API works appropriately to visit all tasks,
> tasks in a process, or a particular task.
> 
> Signed-off-by: Kui-Feng Lee <kuifeng@fb.com>
> ---
>   .../selftests/bpf/prog_tests/bpf_iter.c       | 284 +++++++++++++++++-
>   .../selftests/bpf/prog_tests/btf_dump.c       |   2 +-
>   .../selftests/bpf/progs/bpf_iter_task.c       |   9 +
>   .../selftests/bpf/progs/bpf_iter_task_file.c  |   9 +-
>   .../selftests/bpf/progs/bpf_iter_task_vma.c   |   6 +-
>   .../bpf/progs/bpf_iter_uprobe_offset.c        |  35 +++
>   6 files changed, 326 insertions(+), 19 deletions(-)
>   create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c
> 
[...]
> +
> +static pthread_mutex_t do_nothing_mutex;
> +
> +static void *do_nothing_wait(void *arg)
> +{
> +	pthread_mutex_lock(&do_nothing_mutex);
> +	pthread_mutex_unlock(&do_nothing_mutex);
> +
> +	pthread_exit(arg);
> +}
> +
> +static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
> +				     int *num_unknown, int *num_known)
>   {
>   	struct bpf_iter_task *skel;
> +	pthread_t thread_id;
> +	bool locked = false;

We can have a more 'kernel'-way implementation than using
'locked'.
	if (!ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), 
"pthread_mutex_lock"))
		goto done;
	if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, 
NULL), "pthread_create"))
		goto unlock;

	...
unlock:
	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
done:
	bpf_iter_task__destroy(skel);


> +	void *ret;
>   
>   	skel = bpf_iter_task__open_and_load();
>   	if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
>   		return;
>   
> -	do_dummy_read(skel->progs.dump_task);
> +	if (!ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"))
> +		goto done;
> +	locked = true;
> +
> +	if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
> +		  "pthread_create"))
> +		goto done;
> +
> +
> +	skel->bss->tid = getpid();
> +
> +	do_dummy_read_opts(skel->progs.dump_task, opts);
> +
> +	*num_unknown = skel->bss->num_unknown_tid;
> +	*num_known = skel->bss->num_known_tid;
> +
> +	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
> +	locked = false;
> +	ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
> +		     "pthread_join");
>   
> +done:
> +	if (locked)
> +		ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
>   	bpf_iter_task__destroy(skel);
>   }
>   
> +static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
> +{
> +	int num_unknown_tid, num_known_tid;
> +
> +	test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid);
> +	ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid");
> +	ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
> +}
> +
> +static void test_task(void)

test_task_tid?

> +{
> +	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
> +	union bpf_iter_link_info linfo;
> +	int num_unknown_tid, num_known_tid;
> +
> +	memset(&linfo, 0, sizeof(linfo));
> +	linfo.task.tid = getpid();
> +	opts.link_info = &linfo;
> +	opts.link_info_len = sizeof(linfo);
> +	test_task_common(&opts, 0, 1);
> +
> +	linfo.task.tid = 0;
> +	linfo.task.pid = getpid();
> +	test_task_common(&opts, 1, 1);
> +
> +	test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
> +	ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid");
> +	ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
> +}
> +
> +static void test_task_tgid(void)

test_task_pid?

> +{
> +	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
> +	union bpf_iter_link_info linfo;
> +
> +	memset(&linfo, 0, sizeof(linfo));
> +	linfo.task.pid = getpid();
> +	opts.link_info = &linfo;
> +	opts.link_info_len = sizeof(linfo);
> +
> +	test_task_common(&opts, 1, 1);
> +}
> +
> +static void test_task_pidfd(void)
> +{
> +	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
> +	union bpf_iter_link_info linfo;
> +	int pidfd;
> +
> +	pidfd = pidfd_open(getpid(), 0);
> +	if (!ASSERT_GT(pidfd, 0, "pidfd_open"))
> +		return;
> +
> +	memset(&linfo, 0, sizeof(linfo));
> +	linfo.task.pid_fd = pidfd;
> +	opts.link_info = &linfo;
> +	opts.link_info_len = sizeof(linfo);
> +
> +	test_task_common(&opts, 1, 1);
> +
> +	close(pidfd);
> +}
> +
>   static void test_task_sleepable(void)
>   {
>   	struct bpf_iter_task *skel;
> @@ -212,15 +349,13 @@ static void test_task_stack(void)
>   	bpf_iter_task_stack__destroy(skel);
>   }
>   
> -static void *do_nothing(void *arg)
> -{
> -	pthread_exit(arg);
> -}
> -
>   static void test_task_file(void)
>   {
> +	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
>   	struct bpf_iter_task_file *skel;
> +	union bpf_iter_link_info linfo;
>   	pthread_t thread_id;
> +	bool locked = false;

similar to the above, 'locked' variable can be removed
by implementing an alternative approach.

>   	void *ret;
>   
>   	skel = bpf_iter_task_file__open_and_load();
> @@ -229,19 +364,43 @@ static void test_task_file(void)
>   
>   	skel->bss->tgid = getpid();
>   
> -	if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
> +	if (!ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"))
> +		goto done;
> +	locked = true;
> +
> +	if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
>   		  "pthread_create"))
>   		goto done;
>   
> -	do_dummy_read(skel->progs.dump_task_file);
> +	memset(&linfo, 0, sizeof(linfo));
> +	linfo.task.tid = getpid();
> +	opts.link_info = &linfo;
> +	opts.link_info_len = sizeof(linfo);
>   
> -	if (!ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
> -		  "pthread_join"))
> -		goto done;
> +	do_dummy_read_opts(skel->progs.dump_task_file, &opts);
> +
> +	ASSERT_EQ(skel->bss->count, 0, "check_count");
> +	ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
> +
> +	skel->bss->last_tgid = 0;
> +	skel->bss->count = 0;
> +	skel->bss->unique_tgid_count = 0;
> +
> +	do_dummy_read(skel->progs.dump_task_file);
>   
>   	ASSERT_EQ(skel->bss->count, 0, "check_count");
> +	ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
> +
> +	check_bpf_link_info(skel->progs.dump_task_file);
> +
> +	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
> +	locked = false;
> +	ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join");
> +	ASSERT_NULL(ret, "phtread_join");
>   
>   done:
> +	if (locked)
> +		ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
>   	bpf_iter_task_file__destroy(skel);
>   }
>   
> @@ -1249,7 +1408,7 @@ static void str_strip_first_line(char *str)
>   	*dst = '\0';
>   }
>   
[...
> +
>   void test_bpf_iter(void)
>   {
> +	if (!ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init"))
> +		return;
> +
>   	if (test__start_subtest("btf_id_or_null"))
>   		test_btf_id_or_null();
>   	if (test__start_subtest("ipv6_route"))
> @@ -1337,6 +1583,10 @@ void test_bpf_iter(void)
>   		test_bpf_map();
>   	if (test__start_subtest("task"))
>   		test_task();
> +	if (test__start_subtest("task_tgid"))
> +		test_task_tgid();
> +	if (test__start_subtest("task_pidfd"))
> +		test_task_pidfd();
>   	if (test__start_subtest("task_sleepable"))
>   		test_task_sleepable();
>   	if (test__start_subtest("task_stack"))
> @@ -1397,4 +1647,6 @@ void test_bpf_iter(void)
>   		test_ksym_iter();
>   	if (test__start_subtest("bpf_sockmap_map_iter_fd"))
>   		test_bpf_sockmap_map_iter_fd();
> +	if (test__start_subtest("uprobe_offset"))
> +		test_task_uprobe_offset();

uprobe_offset -> vma_offset? See below.

>   }
> diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
> index 5fce7008d1ff..32c34ce9cbeb 100644
> --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
> +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
> @@ -764,7 +764,7 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d,
>   
>   	/* union with nested struct */
>   	TEST_BTF_DUMP_DATA(btf, d, "union", str, union bpf_iter_link_info, BTF_F_COMPACT,
> -			   "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},}",
> +			   "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.task = (struct){.tid = (__u32)1,},}",
>   			   { .map = { .map_fd = 1 }});
>   
>   	/* struct skb with nested structs/unions; because type output is so
> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task.c b/tools/testing/selftests/bpf/progs/bpf_iter_task.c
> index d22741272692..96131b9a1caa 100644
> --- a/tools/testing/selftests/bpf/progs/bpf_iter_task.c
> +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task.c
> @@ -6,6 +6,10 @@
>   
>   char _license[] SEC("license") = "GPL";
>   
> +uint32_t tid = 0;
> +int num_unknown_tid = 0;
> +int num_known_tid = 0;
> +
>   SEC("iter/task")
>   int dump_task(struct bpf_iter__task *ctx)
>   {
> @@ -18,6 +22,11 @@ int dump_task(struct bpf_iter__task *ctx)
>   		return 0;
>   	}
>   
> +	if (task->pid != tid)
> +		num_unknown_tid++;
> +	else
> +		num_known_tid++;
> +
>   	if (ctx->meta->seq_num == 0)
>   		BPF_SEQ_PRINTF(seq, "    tgid      gid\n");
>   
> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
> index 6e7b400888fe..b0255080662d 100644
> --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
> +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
> @@ -7,14 +7,16 @@ char _license[] SEC("license") = "GPL";
>   
>   int count = 0;
>   int tgid = 0;
> +int last_tgid = 0;
> +int unique_tgid_count = 0;
>   
>   SEC("iter/task_file")
>   int dump_task_file(struct bpf_iter__task_file *ctx)
>   {
>   	struct seq_file *seq = ctx->meta->seq;
>   	struct task_struct *task = ctx->task;
> -	__u32 fd = ctx->fd;
>   	struct file *file = ctx->file;
> +	__u32 fd = ctx->fd;
>   
>   	if (task == (void *)0 || file == (void *)0)
>   		return 0;
> @@ -27,6 +29,11 @@ int dump_task_file(struct bpf_iter__task_file *ctx)
>   	if (tgid == task->tgid && task->tgid != task->pid)
>   		count++;
>   
> +	if (last_tgid != task->tgid) {
> +		last_tgid = task->tgid;
> +		unique_tgid_count++;
> +	}
> +
>   	BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd,
>   		       (long)file->f_op);
>   	return 0;
> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c
> index 4ea6a37d1345..44f4a31c2ddd 100644
> --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c
> +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c
> @@ -20,6 +20,7 @@ char _license[] SEC("license") = "GPL";
>   #define D_PATH_BUF_SIZE 1024
>   char d_path_buf[D_PATH_BUF_SIZE] = {};
>   __u32 pid = 0;
> +__u32 one_task = 0;
>   
>   SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx)
>   {
> @@ -33,8 +34,11 @@ SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx)
>   		return 0;
>   
>   	file = vma->vm_file;
> -	if (task->tgid != pid)
> +	if (task->tgid != pid) {
> +		if (one_task)
> +			BPF_SEQ_PRINTF(seq, "unexpected task (%d != %d)", task->tgid, pid);

Let us change this to an error code like
__u32 one_task_error = 0;
...
if (task->tgid != pid) {
	if (one_task)
		one_task_error = 1;
	return 0;
}
In bpf_iter.c, we can assert one_task_error must be 0?

>   		return 0;
> +	}
>   	perm_str[0] = (vma->vm_flags & VM_READ) ? 'r' : '-';
>   	perm_str[1] = (vma->vm_flags & VM_WRITE) ? 'w' : '-';
>   	perm_str[2] = (vma->vm_flags & VM_EXEC) ? 'x' : '-';
> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c b/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c
> new file mode 100644
> index 000000000000..825ca86678bd
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c

Maybe change file name to bpf_iter_vma_offset so we know the test
is related 'iter/task_vma'? the offset can be used by uprobe, but
it can be used for other purposes, e.g., symbolization.

> @@ -0,0 +1,35 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
> +#include "bpf_iter.h"
> +#include <bpf/bpf_helpers.h>
> +
> +char _license[] SEC("license") = "GPL";
> +
> +__u32 unique_tgid_cnt = 0;
> +uintptr_t address = 0;
> +uintptr_t offset = 0;
> +__u32 last_tgid = 0;
> +__u32 pid = 0;
> +
> +SEC("iter/task_vma") int get_uprobe_offset(struct bpf_iter__task_vma *ctx)

get_vma_offset?

> +{
> +	struct vm_area_struct *vma = ctx->vma;
> +	struct seq_file *seq = ctx->meta->seq;
> +	struct task_struct *task = ctx->task;
> +
> +	if (task == NULL || vma == NULL)
> +		return 0;
> +
> +	if (last_tgid != task->tgid)
> +		unique_tgid_cnt++;
> +	last_tgid = task->tgid;
> +
> +	if (task->tgid != pid)
> +		return 0;
> +
> +	if (vma->vm_start <= address && vma->vm_end > address) {
> +		offset = address - vma->vm_start + (vma->vm_pgoff << 12);
> +		BPF_SEQ_PRINTF(seq, "OK\n");
> +	}
> +	return 0;
> +}
Andrii Nakryiko Aug. 24, 2022, 10:30 p.m. UTC | #2
On Fri, Aug 19, 2022 at 3:09 PM Kui-Feng Lee <kuifeng@fb.com> wrote:
>
> Test iterators of vma, files, and tasks of tasks.
>
> Ensure the API works appropriately to visit all tasks,
> tasks in a process, or a particular task.
>
> Signed-off-by: Kui-Feng Lee <kuifeng@fb.com>
> ---
>  .../selftests/bpf/prog_tests/bpf_iter.c       | 284 +++++++++++++++++-
>  .../selftests/bpf/prog_tests/btf_dump.c       |   2 +-
>  .../selftests/bpf/progs/bpf_iter_task.c       |   9 +
>  .../selftests/bpf/progs/bpf_iter_task_file.c  |   9 +-
>  .../selftests/bpf/progs/bpf_iter_task_vma.c   |   6 +-
>  .../bpf/progs/bpf_iter_uprobe_offset.c        |  35 +++
>  6 files changed, 326 insertions(+), 19 deletions(-)
>  create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c
>

[...]

> -       do_dummy_read(skel->progs.dump_task);
> +       if (!ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"))
> +               goto done;
> +       locked = true;
> +
> +       if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
> +                 "pthread_create"))
> +               goto done;
> +
> +

extra empty line

> +       skel->bss->tid = getpid();
> +
> +       do_dummy_read_opts(skel->progs.dump_task, opts);
> +
> +       *num_unknown = skel->bss->num_unknown_tid;
> +       *num_known = skel->bss->num_known_tid;
> +
> +       ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
> +       locked = false;
> +       ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
> +                    "pthread_join");
>
> +done:
> +       if (locked)

it's a bit of an overkill to expect and handle that
pthread_mutex_lock() might fail, I'd remove those asserts and locked
flag, just assume that lock works (if it's not, it's either test bug
and would be caught early, or something is very broken in the system
anyway)

> +               ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
>         bpf_iter_task__destroy(skel);
>  }
>

[...]

>  static void test_task_sleepable(void)
>  {
>         struct bpf_iter_task *skel;
> @@ -212,15 +349,13 @@ static void test_task_stack(void)
>         bpf_iter_task_stack__destroy(skel);
>  }
>
> -static void *do_nothing(void *arg)
> -{
> -       pthread_exit(arg);
> -}
> -
>  static void test_task_file(void)
>  {
> +       DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);

DECLARE_LIBBPF_OPTS is discouraged, best to use shorter LIBBPF_OPTS

>         struct bpf_iter_task_file *skel;
> +       union bpf_iter_link_info linfo;
>         pthread_t thread_id;
> +       bool locked = false;
>         void *ret;
>
>         skel = bpf_iter_task_file__open_and_load();
> @@ -229,19 +364,43 @@ static void test_task_file(void)
>
>         skel->bss->tgid = getpid();
>
> -       if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
> +       if (!ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"))
> +               goto done;
> +       locked = true;

same about failing mutex_lock, it shouldn't and it's fair to expect
that it won't

> +
> +       if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
>                   "pthread_create"))
>                 goto done;
>
> -       do_dummy_read(skel->progs.dump_task_file);
> +       memset(&linfo, 0, sizeof(linfo));
> +       linfo.task.tid = getpid();
> +       opts.link_info = &linfo;
> +       opts.link_info_len = sizeof(linfo);

[...]

> +       link = bpf_program__attach_iter(skel->progs.get_uprobe_offset, opts);
> +       if (!ASSERT_OK_PTR(link, "attach_iter"))
> +               return;
> +
> +       iter_fd = bpf_iter_create(bpf_link__fd(link));
> +       if (!ASSERT_GT(iter_fd, 0, "create_iter"))
> +               goto exit;
> +
> +       while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
> +               ;
> +       CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));

no checks, please

> +       buf[15] = 0;
> +       ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp");
> +
> +       ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset");
> +       if (one_proc)
> +               ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
> +       else
> +               ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
> +
> +       close(iter_fd);
> +
> +exit:
> +       bpf_link__destroy(link);
> +}
> +
> +static void test_task_uprobe_offset(void)
> +{
> +       LIBBPF_OPTS(bpf_iter_attach_opts, opts);
> +       union bpf_iter_link_info linfo;
> +
> +       memset(&linfo, 0, sizeof(linfo));
> +       linfo.task.pid = getpid();
> +       opts.link_info = &linfo;
> +       opts.link_info_len = sizeof(linfo);
> +
> +       test_task_uprobe_offset_common(&opts, true);
> +
> +       linfo.task.pid = 0;
> +       linfo.task.tid = getpid();
> +       test_task_uprobe_offset_common(&opts, true);
> +
> +       test_task_uprobe_offset_common(NULL, false);
> +}
> +
>  void test_bpf_iter(void)
>  {
> +       if (!ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init"))
> +               return;
> +

ditto, too paranoid, IMO

>         if (test__start_subtest("btf_id_or_null"))
>                 test_btf_id_or_null();
>         if (test__start_subtest("ipv6_route"))

[...]

> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c b/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c
> new file mode 100644
> index 000000000000..825ca86678bd
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c
> @@ -0,0 +1,35 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
> +#include "bpf_iter.h"
> +#include <bpf/bpf_helpers.h>
> +
> +char _license[] SEC("license") = "GPL";
> +
> +__u32 unique_tgid_cnt = 0;
> +uintptr_t address = 0;
> +uintptr_t offset = 0;
> +__u32 last_tgid = 0;
> +__u32 pid = 0;
> +
> +SEC("iter/task_vma") int get_uprobe_offset(struct bpf_iter__task_vma *ctx)

please keep SEC() on separate line

> +{
> +       struct vm_area_struct *vma = ctx->vma;
> +       struct seq_file *seq = ctx->meta->seq;
> +       struct task_struct *task = ctx->task;
> +
> +       if (task == NULL || vma == NULL)
> +               return 0;
> +
> +       if (last_tgid != task->tgid)
> +               unique_tgid_cnt++;
> +       last_tgid = task->tgid;
> +
> +       if (task->tgid != pid)
> +               return 0;
> +
> +       if (vma->vm_start <= address && vma->vm_end > address) {
> +               offset = address - vma->vm_start + (vma->vm_pgoff << 12);

it's best not to assume page_size is 4K, you can pass actual value
through global variable from user-space (we've previously fixed a
bunch of tests with fixed page_size assumption as they break some
platforms, let's not regress that)

> +               BPF_SEQ_PRINTF(seq, "OK\n");
> +       }
> +       return 0;
> +}
> --
> 2.30.2
>
Kui-Feng Lee Aug. 25, 2022, 1:07 a.m. UTC | #3
On Wed, 2022-08-24 at 15:30 -0700, Andrii Nakryiko wrote:
> On Fri, Aug 19, 2022 at 3:09 PM Kui-Feng Lee <kuifeng@fb.com> wrote:
> > 
> > Test iterators of vma, files, and tasks of tasks.
> > 
> > Ensure the API works appropriately to visit all tasks,
> > tasks in a process, or a particular task.
> > 
> > Signed-off-by: Kui-Feng Lee <kuifeng@fb.com>
> > ---
> >  .../selftests/bpf/prog_tests/bpf_iter.c       | 284
> > +++++++++++++++++-
> >  .../selftests/bpf/prog_tests/btf_dump.c       |   2 +-
> >  .../selftests/bpf/progs/bpf_iter_task.c       |   9 +
> >  .../selftests/bpf/progs/bpf_iter_task_file.c  |   9 +-
> >  .../selftests/bpf/progs/bpf_iter_task_vma.c   |   6 +-
> >  .../bpf/progs/bpf_iter_uprobe_offset.c        |  35 +++
> >  6 files changed, 326 insertions(+), 19 deletions(-)
> >  create mode 100644
> > tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c
> 
[...]
> > +
> >  void test_bpf_iter(void)
> >  {
> > +       if (!ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL),
> > "pthread_mutex_init"))
> > +               return;
> > +
> 
> ditto, too paranoid, IMO

Right, for test cases, we can ease checks.
I would like to have something like

    INFALLIBLE(pthread_mutex_init(......)...);

It will crash at the first point.

> 
> >         if (test__start_subtest("btf_id_or_null"))
> >                 test_btf_id_or_null();
> >         if (test__start_subtest("ipv6_route"))
> 
> [...]
> 
> > diff --git
> > a/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c
> > b/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c
> > new file mode 100644
> > index 000000000000..825ca86678bd
> > --- /dev/null
> > +++ b/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c
> > @@ -0,0 +1,35 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
> > +#include "bpf_iter.h"
> > +#include <bpf/bpf_helpers.h>
> > +
> > +char _license[] SEC("license") = "GPL";
> > +
> > +__u32 unique_tgid_cnt = 0;
> > +uintptr_t address = 0;
> > +uintptr_t offset = 0;
> > +__u32 last_tgid = 0;
> > +__u32 pid = 0;
> > +
> > +SEC("iter/task_vma") int get_uprobe_offset(struct
> > bpf_iter__task_vma *ctx)
> 
> please keep SEC() on separate line
> 
> > +{
> > +       struct vm_area_struct *vma = ctx->vma;
> > +       struct seq_file *seq = ctx->meta->seq;
> > +       struct task_struct *task = ctx->task;
> > +
> > +       if (task == NULL || vma == NULL)
> > +               return 0;
> > +
> > +       if (last_tgid != task->tgid)
> > +               unique_tgid_cnt++;
> > +       last_tgid = task->tgid;
> > +
> > +       if (task->tgid != pid)
> > +               return 0;
> > +
> > +       if (vma->vm_start <= address && vma->vm_end > address) {
> > +               offset = address - vma->vm_start + (vma->vm_pgoff
> > << 12);
> 
> it's best not to assume page_size is 4K, you can pass actual value
> through global variable from user-space (we've previously fixed a
> bunch of tests with fixed page_size assumption as they break some
> platforms, let's not regress that)

A getpagesize() helper would help :)

> 
> > +               BPF_SEQ_PRINTF(seq, "OK\n");
> > +       }
> > +       return 0;
> > +}
> > --
> > 2.30.2
> >
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index e89685bd587c..c1ef7ffa6a43 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -1,6 +1,7 @@ 
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2020 Facebook */
 #include <test_progs.h>
+#include <sys/syscall.h>
 #include "bpf_iter_ipv6_route.skel.h"
 #include "bpf_iter_netlink.skel.h"
 #include "bpf_iter_bpf_map.skel.h"
@@ -14,6 +15,7 @@ 
 #include "bpf_iter_udp4.skel.h"
 #include "bpf_iter_udp6.skel.h"
 #include "bpf_iter_unix.skel.h"
+#include "bpf_iter_uprobe_offset.skel.h"
 #include "bpf_iter_test_kern1.skel.h"
 #include "bpf_iter_test_kern2.skel.h"
 #include "bpf_iter_test_kern3.skel.h"
@@ -43,13 +45,13 @@  static void test_btf_id_or_null(void)
 	}
 }
 
-static void do_dummy_read(struct bpf_program *prog)
+static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts)
 {
 	struct bpf_link *link;
 	char buf[16] = {};
 	int iter_fd, len;
 
-	link = bpf_program__attach_iter(prog, NULL);
+	link = bpf_program__attach_iter(prog, opts);
 	if (!ASSERT_OK_PTR(link, "attach_iter"))
 		return;
 
@@ -68,6 +70,11 @@  static void do_dummy_read(struct bpf_program *prog)
 	bpf_link__destroy(link);
 }
 
+static void do_dummy_read(struct bpf_program *prog)
+{
+	do_dummy_read_opts(prog, NULL);
+}
+
 static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
 				struct bpf_map *map)
 {
@@ -167,19 +174,149 @@  static void test_bpf_map(void)
 	bpf_iter_bpf_map__destroy(skel);
 }
 
-static void test_task(void)
+static int pidfd_open(pid_t pid, unsigned int flags)
+{
+	return syscall(SYS_pidfd_open, pid, flags);
+}
+
+static void check_bpf_link_info(const struct bpf_program *prog)
+{
+	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+	union bpf_iter_link_info linfo;
+	struct bpf_link_info info = {};
+	struct bpf_link *link;
+	__u32 info_len;
+	int err;
+
+	memset(&linfo, 0, sizeof(linfo));
+	linfo.task.tid = getpid();
+	opts.link_info = &linfo;
+	opts.link_info_len = sizeof(linfo);
+
+	link = bpf_program__attach_iter(prog, &opts);
+	if (!ASSERT_OK_PTR(link, "attach_iter"))
+		return;
+
+	info_len = sizeof(info);
+	err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
+	ASSERT_OK(err, "bpf_obj_get_info_by_fd");
+	ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
+
+	bpf_link__destroy(link);
+}
+
+static pthread_mutex_t do_nothing_mutex;
+
+static void *do_nothing_wait(void *arg)
+{
+	pthread_mutex_lock(&do_nothing_mutex);
+	pthread_mutex_unlock(&do_nothing_mutex);
+
+	pthread_exit(arg);
+}
+
+static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
+				     int *num_unknown, int *num_known)
 {
 	struct bpf_iter_task *skel;
+	pthread_t thread_id;
+	bool locked = false;
+	void *ret;
 
 	skel = bpf_iter_task__open_and_load();
 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
 		return;
 
-	do_dummy_read(skel->progs.dump_task);
+	if (!ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"))
+		goto done;
+	locked = true;
+
+	if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
+		  "pthread_create"))
+		goto done;
+
+
+	skel->bss->tid = getpid();
+
+	do_dummy_read_opts(skel->progs.dump_task, opts);
+
+	*num_unknown = skel->bss->num_unknown_tid;
+	*num_known = skel->bss->num_known_tid;
+
+	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
+	locked = false;
+	ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
+		     "pthread_join");
 
+done:
+	if (locked)
+		ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
 	bpf_iter_task__destroy(skel);
 }
 
+static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
+{
+	int num_unknown_tid, num_known_tid;
+
+	test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid);
+	ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid");
+	ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
+}
+
+static void test_task(void)
+{
+	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+	union bpf_iter_link_info linfo;
+	int num_unknown_tid, num_known_tid;
+
+	memset(&linfo, 0, sizeof(linfo));
+	linfo.task.tid = getpid();
+	opts.link_info = &linfo;
+	opts.link_info_len = sizeof(linfo);
+	test_task_common(&opts, 0, 1);
+
+	linfo.task.tid = 0;
+	linfo.task.pid = getpid();
+	test_task_common(&opts, 1, 1);
+
+	test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
+	ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid");
+	ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
+}
+
+static void test_task_tgid(void)
+{
+	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+	union bpf_iter_link_info linfo;
+
+	memset(&linfo, 0, sizeof(linfo));
+	linfo.task.pid = getpid();
+	opts.link_info = &linfo;
+	opts.link_info_len = sizeof(linfo);
+
+	test_task_common(&opts, 1, 1);
+}
+
+static void test_task_pidfd(void)
+{
+	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+	union bpf_iter_link_info linfo;
+	int pidfd;
+
+	pidfd = pidfd_open(getpid(), 0);
+	if (!ASSERT_GT(pidfd, 0, "pidfd_open"))
+		return;
+
+	memset(&linfo, 0, sizeof(linfo));
+	linfo.task.pid_fd = pidfd;
+	opts.link_info = &linfo;
+	opts.link_info_len = sizeof(linfo);
+
+	test_task_common(&opts, 1, 1);
+
+	close(pidfd);
+}
+
 static void test_task_sleepable(void)
 {
 	struct bpf_iter_task *skel;
@@ -212,15 +349,13 @@  static void test_task_stack(void)
 	bpf_iter_task_stack__destroy(skel);
 }
 
-static void *do_nothing(void *arg)
-{
-	pthread_exit(arg);
-}
-
 static void test_task_file(void)
 {
+	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 	struct bpf_iter_task_file *skel;
+	union bpf_iter_link_info linfo;
 	pthread_t thread_id;
+	bool locked = false;
 	void *ret;
 
 	skel = bpf_iter_task_file__open_and_load();
@@ -229,19 +364,43 @@  static void test_task_file(void)
 
 	skel->bss->tgid = getpid();
 
-	if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
+	if (!ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"))
+		goto done;
+	locked = true;
+
+	if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
 		  "pthread_create"))
 		goto done;
 
-	do_dummy_read(skel->progs.dump_task_file);
+	memset(&linfo, 0, sizeof(linfo));
+	linfo.task.tid = getpid();
+	opts.link_info = &linfo;
+	opts.link_info_len = sizeof(linfo);
 
-	if (!ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
-		  "pthread_join"))
-		goto done;
+	do_dummy_read_opts(skel->progs.dump_task_file, &opts);
+
+	ASSERT_EQ(skel->bss->count, 0, "check_count");
+	ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
+
+	skel->bss->last_tgid = 0;
+	skel->bss->count = 0;
+	skel->bss->unique_tgid_count = 0;
+
+	do_dummy_read(skel->progs.dump_task_file);
 
 	ASSERT_EQ(skel->bss->count, 0, "check_count");
+	ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
+
+	check_bpf_link_info(skel->progs.dump_task_file);
+
+	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
+	locked = false;
+	ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join");
+	ASSERT_NULL(ret, "phtread_join");
 
 done:
+	if (locked)
+		ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
 	bpf_iter_task_file__destroy(skel);
 }
 
@@ -1249,7 +1408,7 @@  static void str_strip_first_line(char *str)
 	*dst = '\0';
 }
 
-static void test_task_vma(void)
+static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
 {
 	int err, iter_fd = -1, proc_maps_fd = -1;
 	struct bpf_iter_task_vma *skel;
@@ -1261,13 +1420,14 @@  static void test_task_vma(void)
 		return;
 
 	skel->bss->pid = getpid();
+	skel->bss->one_task = opts ? 1 : 0;
 
 	err = bpf_iter_task_vma__load(skel);
 	if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
 		goto out;
 
 	skel->links.proc_maps = bpf_program__attach_iter(
-		skel->progs.proc_maps, NULL);
+		skel->progs.proc_maps, opts);
 
 	if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
 		skel->links.proc_maps = NULL;
@@ -1306,6 +1466,9 @@  static void test_task_vma(void)
 	str_strip_first_line(proc_maps_output);
 
 	ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
+
+	check_bpf_link_info(skel->progs.proc_maps);
+
 out:
 	close(proc_maps_fd);
 	close(iter_fd);
@@ -1325,8 +1488,91 @@  void test_bpf_sockmap_map_iter_fd(void)
 	bpf_iter_sockmap__destroy(skel);
 }
 
+static void test_task_vma(void)
+{
+	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+	union bpf_iter_link_info linfo;
+
+	memset(&linfo, 0, sizeof(linfo));
+	linfo.task.tid = getpid();
+	opts.link_info = &linfo;
+	opts.link_info_len = sizeof(linfo);
+
+	test_task_vma_common(&opts);
+	test_task_vma_common(NULL);
+}
+
+/* uprobe attach point */
+static noinline int trigger_func(int arg)
+{
+	asm volatile ("");
+	return arg + 1;
+}
+
+static void test_task_uprobe_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
+{
+	struct bpf_iter_uprobe_offset *skel;
+	struct bpf_link *link;
+	char buf[16] = {};
+	int iter_fd, len;
+
+	skel = bpf_iter_uprobe_offset__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "bpf_iter_uprobe_offset__open_and_load"))
+		return;
+
+	skel->bss->pid = getpid();
+	skel->bss->address = (uintptr_t)trigger_func;
+
+	link = bpf_program__attach_iter(skel->progs.get_uprobe_offset, opts);
+	if (!ASSERT_OK_PTR(link, "attach_iter"))
+		return;
+
+	iter_fd = bpf_iter_create(bpf_link__fd(link));
+	if (!ASSERT_GT(iter_fd, 0, "create_iter"))
+		goto exit;
+
+	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
+		;
+	CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
+	buf[15] = 0;
+	ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp");
+
+	ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset");
+	if (one_proc)
+		ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
+	else
+		ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
+
+	close(iter_fd);
+
+exit:
+	bpf_link__destroy(link);
+}
+
+static void test_task_uprobe_offset(void)
+{
+	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+	union bpf_iter_link_info linfo;
+
+	memset(&linfo, 0, sizeof(linfo));
+	linfo.task.pid = getpid();
+	opts.link_info = &linfo;
+	opts.link_info_len = sizeof(linfo);
+
+	test_task_uprobe_offset_common(&opts, true);
+
+	linfo.task.pid = 0;
+	linfo.task.tid = getpid();
+	test_task_uprobe_offset_common(&opts, true);
+
+	test_task_uprobe_offset_common(NULL, false);
+}
+
 void test_bpf_iter(void)
 {
+	if (!ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init"))
+		return;
+
 	if (test__start_subtest("btf_id_or_null"))
 		test_btf_id_or_null();
 	if (test__start_subtest("ipv6_route"))
@@ -1337,6 +1583,10 @@  void test_bpf_iter(void)
 		test_bpf_map();
 	if (test__start_subtest("task"))
 		test_task();
+	if (test__start_subtest("task_tgid"))
+		test_task_tgid();
+	if (test__start_subtest("task_pidfd"))
+		test_task_pidfd();
 	if (test__start_subtest("task_sleepable"))
 		test_task_sleepable();
 	if (test__start_subtest("task_stack"))
@@ -1397,4 +1647,6 @@  void test_bpf_iter(void)
 		test_ksym_iter();
 	if (test__start_subtest("bpf_sockmap_map_iter_fd"))
 		test_bpf_sockmap_map_iter_fd();
+	if (test__start_subtest("uprobe_offset"))
+		test_task_uprobe_offset();
 }
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index 5fce7008d1ff..32c34ce9cbeb 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -764,7 +764,7 @@  static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d,
 
 	/* union with nested struct */
 	TEST_BTF_DUMP_DATA(btf, d, "union", str, union bpf_iter_link_info, BTF_F_COMPACT,
-			   "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},}",
+			   "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.task = (struct){.tid = (__u32)1,},}",
 			   { .map = { .map_fd = 1 }});
 
 	/* struct skb with nested structs/unions; because type output is so
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task.c b/tools/testing/selftests/bpf/progs/bpf_iter_task.c
index d22741272692..96131b9a1caa 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task.c
@@ -6,6 +6,10 @@ 
 
 char _license[] SEC("license") = "GPL";
 
+uint32_t tid = 0;
+int num_unknown_tid = 0;
+int num_known_tid = 0;
+
 SEC("iter/task")
 int dump_task(struct bpf_iter__task *ctx)
 {
@@ -18,6 +22,11 @@  int dump_task(struct bpf_iter__task *ctx)
 		return 0;
 	}
 
+	if (task->pid != tid)
+		num_unknown_tid++;
+	else
+		num_known_tid++;
+
 	if (ctx->meta->seq_num == 0)
 		BPF_SEQ_PRINTF(seq, "    tgid      gid\n");
 
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
index 6e7b400888fe..b0255080662d 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
@@ -7,14 +7,16 @@  char _license[] SEC("license") = "GPL";
 
 int count = 0;
 int tgid = 0;
+int last_tgid = 0;
+int unique_tgid_count = 0;
 
 SEC("iter/task_file")
 int dump_task_file(struct bpf_iter__task_file *ctx)
 {
 	struct seq_file *seq = ctx->meta->seq;
 	struct task_struct *task = ctx->task;
-	__u32 fd = ctx->fd;
 	struct file *file = ctx->file;
+	__u32 fd = ctx->fd;
 
 	if (task == (void *)0 || file == (void *)0)
 		return 0;
@@ -27,6 +29,11 @@  int dump_task_file(struct bpf_iter__task_file *ctx)
 	if (tgid == task->tgid && task->tgid != task->pid)
 		count++;
 
+	if (last_tgid != task->tgid) {
+		last_tgid = task->tgid;
+		unique_tgid_count++;
+	}
+
 	BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd,
 		       (long)file->f_op);
 	return 0;
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c
index 4ea6a37d1345..44f4a31c2ddd 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c
@@ -20,6 +20,7 @@  char _license[] SEC("license") = "GPL";
 #define D_PATH_BUF_SIZE 1024
 char d_path_buf[D_PATH_BUF_SIZE] = {};
 __u32 pid = 0;
+__u32 one_task = 0;
 
 SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx)
 {
@@ -33,8 +34,11 @@  SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx)
 		return 0;
 
 	file = vma->vm_file;
-	if (task->tgid != pid)
+	if (task->tgid != pid) {
+		if (one_task)
+			BPF_SEQ_PRINTF(seq, "unexpected task (%d != %d)", task->tgid, pid);
 		return 0;
+	}
 	perm_str[0] = (vma->vm_flags & VM_READ) ? 'r' : '-';
 	perm_str[1] = (vma->vm_flags & VM_WRITE) ? 'w' : '-';
 	perm_str[2] = (vma->vm_flags & VM_EXEC) ? 'x' : '-';
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c b/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c
new file mode 100644
index 000000000000..825ca86678bd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c
@@ -0,0 +1,35 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include "bpf_iter.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 unique_tgid_cnt = 0;
+uintptr_t address = 0;
+uintptr_t offset = 0;
+__u32 last_tgid = 0;
+__u32 pid = 0;
+
+SEC("iter/task_vma") int get_uprobe_offset(struct bpf_iter__task_vma *ctx)
+{
+	struct vm_area_struct *vma = ctx->vma;
+	struct seq_file *seq = ctx->meta->seq;
+	struct task_struct *task = ctx->task;
+
+	if (task == NULL || vma == NULL)
+		return 0;
+
+	if (last_tgid != task->tgid)
+		unique_tgid_cnt++;
+	last_tgid = task->tgid;
+
+	if (task->tgid != pid)
+		return 0;
+
+	if (vma->vm_start <= address && vma->vm_end > address) {
+		offset = address - vma->vm_start + (vma->vm_pgoff << 12);
+		BPF_SEQ_PRINTF(seq, "OK\n");
+	}
+	return 0;
+}