@@ -2,6 +2,7 @@
#include <test_progs.h>
#include "percpu_alloc_array.skel.h"
#include "percpu_alloc_cgrp_local_storage.skel.h"
+#include "percpu_alloc_nested_special_fields.skel.h"
static void test_array(void)
{
@@ -107,6 +108,43 @@ static void test_cgrp_local_storage(void)
close(cgroup_fd);
}
+static void test_nested_special_fields(void)
+{
+ struct percpu_alloc_nested_special_fields *skel;
+ int err, cgroup_fd, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ cgroup_fd = test__join_cgroup("/percpu_alloc");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /percpu_alloc"))
+ return;
+
+ skel = percpu_alloc_nested_special_fields__open();
+ if (!ASSERT_OK_PTR(skel, "percpu_alloc_nested_special_fields__open"))
+ goto close_fd;
+
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ err = percpu_alloc_nested_special_fields__load(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_nested_special_fields__load"))
+ goto destroy_skel;
+
+ err = percpu_alloc_nested_special_fields__attach(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_nested_special_fields__attach"))
+ goto destroy_skel;
+
+ prog_fd = bpf_program__fd(skel->progs.test_cgrp_local_storage_1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run nested_special_fields 1-3");
+ ASSERT_EQ(topts.retval, 0, "test_run nested_special_fields 1-3");
+ ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
+ ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
+
+destroy_skel:
+ percpu_alloc_nested_special_fields__destroy(skel);
+close_fd:
+ close(cgroup_fd);
+}
+
void test_percpu_alloc(void)
{
if (test__start_subtest("array"))
@@ -115,4 +153,6 @@ void test_percpu_alloc(void)
test_array_sleepable();
if (test__start_subtest("cgrp_local_storage"))
test_cgrp_local_storage();
+ if (test__start_subtest("nested_special_fields"))
+ test_nested_special_fields();
}
new file mode 100644
@@ -0,0 +1,121 @@
+#include "bpf_experimental.h"
+
+struct foo {
+ long key, data;
+ struct bpf_list_node node;
+};
+
+struct val_t {
+ long b, c, d;
+ struct bpf_list_head head __contains(foo, node);
+ struct bpf_spin_lock lock;
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct elem);
+} cgrp SEC(".maps");
+
+const volatile int nr_cpus;
+
+/* Initialize the percpu object */
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test_cgrp_local_storage_1)
+{
+ struct task_struct *task;
+ struct val_t __percpu *p;
+ struct elem *e;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+/* Percpu data collection */
+SEC("fentry/bpf_fentry_test2")
+int BPF_PROG(test_cgrp_local_storage_2)
+{
+ struct task_struct *task;
+ struct val_t __percpu *p;
+ struct val_t *v;
+ struct elem *e;
+ struct foo *f;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ return 0;
+ v->c = 1;
+ v->d = 2;
+
+ f = bpf_obj_new(struct foo);
+ if (!f)
+ return 0;
+ bpf_spin_lock(&v->lock);
+ bpf_list_push_back(&v->head, &f->node);
+ bpf_spin_unlock(&v->lock);
+
+ return 0;
+}
+
+int cpu0_field_d, sum_field_c;
+
+/* Summarize percpu data collection */
+SEC("fentry/bpf_fentry_test3")
+int BPF_PROG(test_cgrp_local_storage_3)
+{
+ struct task_struct *task;
+ struct val_t __percpu *p;
+ struct val_t *v;
+ struct elem *e;
+ int i;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
This is to test nested special fields with the following pattern: struct val_t { long b, c, d; struct bpf_list_head head __contains(foo, node); struct bpf_spin_lock lock; }; struct map_val_t { ... struct val_t __percpu *pc; ... }; That is, the percpu data struct can hold a linked list. Signed-off-by: Yonghong Song <yonghong.song@linux.dev> --- .../selftests/bpf/prog_tests/percpu_alloc.c | 40 ++++++ .../percpu_alloc_nested_special_fields.c | 121 ++++++++++++++++++ 2 files changed, 161 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/percpu_alloc_nested_special_fields.c