diff mbox series

[bpf-next,7/8] selftests/bpf: add hashmap test for bpf_for_each_map_elem() helper

Message ID 20210204234835.1629656-1-yhs@fb.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: add bpf_for_each_map_elem() helper | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 8 maintainers not CCed: shuah@kernel.org netdev@vger.kernel.org songliubraving@fb.com linux-kselftest@vger.kernel.org andrii@kernel.org kpsingh@kernel.org john.fastabend@gmail.com kafai@fb.com
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/kdoc success Errors and warnings before: 2 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch fail ERROR: do not initialise globals to 0 WARNING: added, moved or deleted file(s), does MAINTAINERS need updating?
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Yonghong Song Feb. 4, 2021, 11:48 p.m. UTC
A test case is added for hashmap and percpu hashmap. The test
also exercises nested bpf_for_each_map_elem() calls like
    bpf_prog:
      bpf_for_each_map_elem(func1)
    func1:
      bpf_for_each_map_elem(func2)
    func2:

  $ ./test_progs -n 44
  #44/1 hash_map:OK
  #44 for_each:OK
  Summary: 1/1 PASSED, 0 SKIPPED, 0 FAILED

Signed-off-by: Yonghong Song <yhs@fb.com>
---
 .../selftests/bpf/prog_tests/for_each.c       |  91 ++++++++++++++++
 .../bpf/progs/for_each_hash_map_elem.c        | 103 ++++++++++++++++++
 2 files changed, 194 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/for_each.c
 create mode 100644 tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c

Comments

Andrii Nakryiko Feb. 8, 2021, 6:34 p.m. UTC | #1
On Thu, Feb 4, 2021 at 5:53 PM Yonghong Song <yhs@fb.com> wrote:
>
> A test case is added for hashmap and percpu hashmap. The test
> also exercises nested bpf_for_each_map_elem() calls like
>     bpf_prog:
>       bpf_for_each_map_elem(func1)
>     func1:
>       bpf_for_each_map_elem(func2)
>     func2:
>
>   $ ./test_progs -n 44
>   #44/1 hash_map:OK
>   #44 for_each:OK
>   Summary: 1/1 PASSED, 0 SKIPPED, 0 FAILED
>
> Signed-off-by: Yonghong Song <yhs@fb.com>
> ---
>  .../selftests/bpf/prog_tests/for_each.c       |  91 ++++++++++++++++
>  .../bpf/progs/for_each_hash_map_elem.c        | 103 ++++++++++++++++++
>  2 files changed, 194 insertions(+)
>  create mode 100644 tools/testing/selftests/bpf/prog_tests/for_each.c
>  create mode 100644 tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
>

[...]

> +       num_cpus = bpf_num_possible_cpus();
> +       percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
> +       percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
> +       if (CHECK_FAIL(!percpu_valbuf))
> +               goto out;
> +
> +       key = 1;
> +       for (i = 0; i < num_cpus; i++)
> +               percpu_valbuf[i] = i + 1;
> +       err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
> +       if (CHECK(err, "percpu_map_update", "map_update failed\n"))
> +               goto out;
> +
> +       do_dummy_read(skel->progs.dump_task);

why use iter/task programs to trigger your test BPF code? This test
doesn't seem to rely on anything iter-specific, so it's much simpler
(and less code) to just use the typical sys_enter approach with
usleep(1) as a trigger function, no?

> +
> +       ASSERT_EQ(skel->bss->called, 1, "called");
> +       ASSERT_EQ(skel->bss->hashmap_output, 4, "output_val");
> +
> +       key = 1;
> +       err = bpf_map_lookup_elem(hashmap_fd, &key, &val);
> +       ASSERT_ERR(err, "hashmap_lookup");
> +
> +       ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
> +       CHECK_FAIL(skel->bss->cpu >= num_cpus);

please don't use CHECK_FAIL: use CHECK or one of ASSERT_xxx variants

> +       ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
> +       ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
> +       ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
> +out:
> +       free(percpu_valbuf);
> +       for_each_hash_map_elem__destroy(skel);
> +}
> +
> +void test_for_each(void)
> +{
> +       if (test__start_subtest("hash_map"))
> +               test_hash_map();
> +}

[...]

> +
> +__u32 cpu = 0;
> +__u32 percpu_called = 0;
> +__u32 percpu_key = 0;
> +__u64 percpu_val = 0;
> +
> +static __u64
> +check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
> +                 struct callback_ctx *data)
> +{
> +       percpu_called++;
> +       cpu = bpf_get_smp_processor_id();

It's a bit counter-intuitive (at least I was confused initially) that
for a per-cpu array for_each() will iterate only current CPU's
elements. I think it's worthwhile to emphasize this in
bpf_for_each_map_elem() documentation (at least), and call out in
corresponding patches adding per-cpu array/hash iteration support.

> +       percpu_key = *key;
> +       percpu_val = *val;
> +
> +       bpf_for_each_map_elem(&hashmap, check_hash_elem, data, 0);
> +       return 0;
> +}
> +

[...]
Yonghong Song Feb. 9, 2021, 6:46 a.m. UTC | #2
On 2/8/21 10:34 AM, Andrii Nakryiko wrote:
> On Thu, Feb 4, 2021 at 5:53 PM Yonghong Song <yhs@fb.com> wrote:
>>
>> A test case is added for hashmap and percpu hashmap. The test
>> also exercises nested bpf_for_each_map_elem() calls like
>>      bpf_prog:
>>        bpf_for_each_map_elem(func1)
>>      func1:
>>        bpf_for_each_map_elem(func2)
>>      func2:
>>
>>    $ ./test_progs -n 44
>>    #44/1 hash_map:OK
>>    #44 for_each:OK
>>    Summary: 1/1 PASSED, 0 SKIPPED, 0 FAILED
>>
>> Signed-off-by: Yonghong Song <yhs@fb.com>
>> ---
>>   .../selftests/bpf/prog_tests/for_each.c       |  91 ++++++++++++++++
>>   .../bpf/progs/for_each_hash_map_elem.c        | 103 ++++++++++++++++++
>>   2 files changed, 194 insertions(+)
>>   create mode 100644 tools/testing/selftests/bpf/prog_tests/for_each.c
>>   create mode 100644 tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
>>
> 
> [...]
> 
>> +       num_cpus = bpf_num_possible_cpus();
>> +       percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
>> +       percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
>> +       if (CHECK_FAIL(!percpu_valbuf))
>> +               goto out;
>> +
>> +       key = 1;
>> +       for (i = 0; i < num_cpus; i++)
>> +               percpu_valbuf[i] = i + 1;
>> +       err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
>> +       if (CHECK(err, "percpu_map_update", "map_update failed\n"))
>> +               goto out;
>> +
>> +       do_dummy_read(skel->progs.dump_task);
> 
> why use iter/task programs to trigger your test BPF code? This test
> doesn't seem to rely on anything iter-specific, so it's much simpler
> (and less code) to just use the typical sys_enter approach with
> usleep(1) as a trigger function, no?

I am aware of this. I did not change this in v1 mainly wanting to
get some comments on API and verifier change etc. for v1.
I will use bpf_prog_test_run() to call the program in v2.

> 
>> +
>> +       ASSERT_EQ(skel->bss->called, 1, "called");
>> +       ASSERT_EQ(skel->bss->hashmap_output, 4, "output_val");
>> +
>> +       key = 1;
>> +       err = bpf_map_lookup_elem(hashmap_fd, &key, &val);
>> +       ASSERT_ERR(err, "hashmap_lookup");
>> +
>> +       ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
>> +       CHECK_FAIL(skel->bss->cpu >= num_cpus);
> 
> please don't use CHECK_FAIL: use CHECK or one of ASSERT_xxx variants

We do not have ASSERT_GE, I may invent one.

> 
>> +       ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
>> +       ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
>> +       ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
>> +out:
>> +       free(percpu_valbuf);
>> +       for_each_hash_map_elem__destroy(skel);
>> +}
>> +
>> +void test_for_each(void)
>> +{
>> +       if (test__start_subtest("hash_map"))
>> +               test_hash_map();
>> +}
> 
> [...]
> 
>> +
>> +__u32 cpu = 0;
>> +__u32 percpu_called = 0;
>> +__u32 percpu_key = 0;
>> +__u64 percpu_val = 0;
>> +
>> +static __u64
>> +check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
>> +                 struct callback_ctx *data)
>> +{
>> +       percpu_called++;
>> +       cpu = bpf_get_smp_processor_id();
> 
> It's a bit counter-intuitive (at least I was confused initially) that
> for a per-cpu array for_each() will iterate only current CPU's
> elements. I think it's worthwhile to emphasize this in
> bpf_for_each_map_elem() documentation (at least), and call out in
> corresponding patches adding per-cpu array/hash iteration support.

Right. Will emphasize this in uapi bpf.h and also comments in the code.

> 
>> +       percpu_key = *key;
>> +       percpu_val = *val;
>> +
>> +       bpf_for_each_map_elem(&hashmap, check_hash_elem, data, 0);
>> +       return 0;
>> +}
>> +
> 
> [...]
>
Andrii Nakryiko Feb. 9, 2021, 5:36 p.m. UTC | #3
On Mon, Feb 8, 2021 at 10:46 PM Yonghong Song <yhs@fb.com> wrote:
>
>
>
> On 2/8/21 10:34 AM, Andrii Nakryiko wrote:
> > On Thu, Feb 4, 2021 at 5:53 PM Yonghong Song <yhs@fb.com> wrote:
> >>
> >> A test case is added for hashmap and percpu hashmap. The test
> >> also exercises nested bpf_for_each_map_elem() calls like
> >>      bpf_prog:
> >>        bpf_for_each_map_elem(func1)
> >>      func1:
> >>        bpf_for_each_map_elem(func2)
> >>      func2:
> >>
> >>    $ ./test_progs -n 44
> >>    #44/1 hash_map:OK
> >>    #44 for_each:OK
> >>    Summary: 1/1 PASSED, 0 SKIPPED, 0 FAILED
> >>
> >> Signed-off-by: Yonghong Song <yhs@fb.com>
> >> ---
> >>   .../selftests/bpf/prog_tests/for_each.c       |  91 ++++++++++++++++
> >>   .../bpf/progs/for_each_hash_map_elem.c        | 103 ++++++++++++++++++
> >>   2 files changed, 194 insertions(+)
> >>   create mode 100644 tools/testing/selftests/bpf/prog_tests/for_each.c
> >>   create mode 100644 tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
> >>
> >
> > [...]
> >
> >> +       num_cpus = bpf_num_possible_cpus();
> >> +       percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
> >> +       percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
> >> +       if (CHECK_FAIL(!percpu_valbuf))
> >> +               goto out;
> >> +
> >> +       key = 1;
> >> +       for (i = 0; i < num_cpus; i++)
> >> +               percpu_valbuf[i] = i + 1;
> >> +       err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
> >> +       if (CHECK(err, "percpu_map_update", "map_update failed\n"))
> >> +               goto out;
> >> +
> >> +       do_dummy_read(skel->progs.dump_task);
> >
> > why use iter/task programs to trigger your test BPF code? This test
> > doesn't seem to rely on anything iter-specific, so it's much simpler
> > (and less code) to just use the typical sys_enter approach with
> > usleep(1) as a trigger function, no?
>
> I am aware of this. I did not change this in v1 mainly wanting to
> get some comments on API and verifier change etc. for v1.
> I will use bpf_prog_test_run() to call the program in v2.
>
> >
> >> +
> >> +       ASSERT_EQ(skel->bss->called, 1, "called");
> >> +       ASSERT_EQ(skel->bss->hashmap_output, 4, "output_val");
> >> +
> >> +       key = 1;
> >> +       err = bpf_map_lookup_elem(hashmap_fd, &key, &val);
> >> +       ASSERT_ERR(err, "hashmap_lookup");
> >> +
> >> +       ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
> >> +       CHECK_FAIL(skel->bss->cpu >= num_cpus);
> >
> > please don't use CHECK_FAIL: use CHECK or one of ASSERT_xxx variants
>
> We do not have ASSERT_GE, I may invent one.

Yeah, it has come up multiple times now. It would be great to have all
the inequality variants so that we can stop adding new CHECK()s which
has notoriously confusing semantics. Thanks!

>
> >
> >> +       ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
> >> +       ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
> >> +       ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
> >> +out:
> >> +       free(percpu_valbuf);
> >> +       for_each_hash_map_elem__destroy(skel);
> >> +}
> >> +
> >> +void test_for_each(void)
> >> +{
> >> +       if (test__start_subtest("hash_map"))
> >> +               test_hash_map();
> >> +}
> >
> > [...]
> >
> >> +
> >> +__u32 cpu = 0;
> >> +__u32 percpu_called = 0;
> >> +__u32 percpu_key = 0;
> >> +__u64 percpu_val = 0;
> >> +
> >> +static __u64
> >> +check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
> >> +                 struct callback_ctx *data)
> >> +{
> >> +       percpu_called++;
> >> +       cpu = bpf_get_smp_processor_id();
> >
> > It's a bit counter-intuitive (at least I was confused initially) that
> > for a per-cpu array for_each() will iterate only current CPU's
> > elements. I think it's worthwhile to emphasize this in
> > bpf_for_each_map_elem() documentation (at least), and call out in
> > corresponding patches adding per-cpu array/hash iteration support.
>
> Right. Will emphasize this in uapi bpf.h and also comments in the code.
>
> >
> >> +       percpu_key = *key;
> >> +       percpu_val = *val;
> >> +
> >> +       bpf_for_each_map_elem(&hashmap, check_hash_elem, data, 0);
> >> +       return 0;
> >> +}
> >> +
> >
> > [...]
> >
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/for_each.c b/tools/testing/selftests/bpf/prog_tests/for_each.c
new file mode 100644
index 000000000000..7a399fbc89a4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/for_each.c
@@ -0,0 +1,91 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include "for_each_hash_map_elem.skel.h"
+
+static int duration;
+
+static void do_dummy_read(struct bpf_program *prog)
+{
+	struct bpf_link *link;
+	char buf[16] = {};
+	int iter_fd, len;
+
+	link = bpf_program__attach_iter(prog, NULL);
+	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+		return;
+
+	iter_fd = bpf_iter_create(bpf_link__fd(link));
+	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+		goto free_link;
+
+	/* not check contents, but ensure read() ends without error */
+	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
+		;
+	CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
+
+	close(iter_fd);
+
+free_link:
+	bpf_link__destroy(link);
+}
+
+static void test_hash_map(void)
+{
+	int i, hashmap_fd, percpu_map_fd, err;
+	struct for_each_hash_map_elem *skel;
+	__u64 *percpu_valbuf = NULL;
+	__u32 key, num_cpus;
+	__u64 val;
+
+	skel = for_each_hash_map_elem__open_and_load();
+	if (CHECK(!skel, "for_each_hash_map_elem__open_and_load",
+		  "skeleton open_and_load failed\n"))
+		return;
+
+	hashmap_fd = bpf_map__fd(skel->maps.hashmap);
+	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap); i++) {
+		key = i;
+		val = i + 1;
+		err = bpf_map_update_elem(hashmap_fd, &key, &val, BPF_ANY);
+		if (CHECK(err, "map_update", "map_update failed\n"))
+			goto out;
+	}
+
+	num_cpus = bpf_num_possible_cpus();
+	percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
+	percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
+	if (CHECK_FAIL(!percpu_valbuf))
+		goto out;
+
+	key = 1;
+	for (i = 0; i < num_cpus; i++)
+		percpu_valbuf[i] = i + 1;
+	err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
+	if (CHECK(err, "percpu_map_update", "map_update failed\n"))
+		goto out;
+
+	do_dummy_read(skel->progs.dump_task);
+
+	ASSERT_EQ(skel->bss->called, 1, "called");
+	ASSERT_EQ(skel->bss->hashmap_output, 4, "output_val");
+
+	key = 1;
+	err = bpf_map_lookup_elem(hashmap_fd, &key, &val);
+	ASSERT_ERR(err, "hashmap_lookup");
+
+	ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
+	CHECK_FAIL(skel->bss->cpu >= num_cpus);
+	ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
+	ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
+	ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
+out:
+	free(percpu_valbuf);
+	for_each_hash_map_elem__destroy(skel);
+}
+
+void test_for_each(void)
+{
+	if (test__start_subtest("hash_map"))
+		test_hash_map();
+}
diff --git a/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
new file mode 100644
index 000000000000..7808a5aa75e7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
@@ -0,0 +1,103 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "bpf_iter.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 3);
+	__type(key, __u32);
+	__type(value, __u64);
+} hashmap SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+	__uint(max_entries, 1);
+	__type(key, __u32);
+	__type(value, __u64);
+} percpu_map SEC(".maps");
+
+struct callback_ctx {
+	struct bpf_iter__task *ctx;
+	int input;
+	int output;
+};
+
+static __u64
+check_hash_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+		struct callback_ctx *data)
+{
+	struct bpf_iter__task *ctx = data->ctx;
+	__u32 k;
+	__u64 v;
+
+	if (ctx) {
+		k = *key;
+		v = *val;
+		if (ctx->meta->seq_num == 10 && k == 10 && v == 10)
+			data->output = 3; /* impossible path */
+		else
+			data->output = 4;
+	} else {
+		data->output = data->input;
+		bpf_map_delete_elem(map, key);
+	}
+
+	return 0;
+}
+
+__u32 cpu = 0;
+__u32 percpu_called = 0;
+__u32 percpu_key = 0;
+__u64 percpu_val = 0;
+
+static __u64
+check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+		  struct callback_ctx *data)
+{
+	percpu_called++;
+	cpu = bpf_get_smp_processor_id();
+	percpu_key = *key;
+	percpu_val = *val;
+
+	bpf_for_each_map_elem(&hashmap, check_hash_elem, data, 0);
+	return 0;
+}
+
+int called = 0;
+int hashmap_output = 0;
+int percpu_output = 0;
+
+SEC("iter/task")
+int dump_task(struct bpf_iter__task *ctx)
+{
+	struct seq_file *seq =  ctx->meta->seq;
+	struct task_struct *task = ctx->task;
+	struct callback_ctx data;
+	int ret;
+
+	/* only call once since we will delete map elements */
+	if (task == (void *)0 || called > 0)
+		return 0;
+
+	called++;
+
+	data.ctx = ctx;
+	data.input = task->tgid;
+	data.output = 0;
+	ret = bpf_for_each_map_elem(&hashmap, check_hash_elem, &data, 0);
+	if (ret)
+		return 0;
+
+	hashmap_output = data.output;
+
+	data.ctx = 0;
+	data.input = 100;
+	data.output = 0;
+	bpf_for_each_map_elem(&percpu_map, check_percpu_elem, &data, 0);
+	percpu_output = data.output;
+
+	return 0;
+}