Message ID | 20231222113102.4148-3-laoar.shao@gmail.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | BPF |
Headers | show |
Series | bpf: Add bpf_iter_cpumask | expand |
On Fri, Dec 22, 2023 at 3:31 AM Yafang Shao <laoar.shao@gmail.com> wrote: > > Add three new kfuncs for bpf_iter_cpumask. > - bpf_iter_cpumask_new > - bpf_iter_cpumask_next > - bpf_iter_cpumask_destroy > > These new kfuncs facilitate the iteration of percpu data, such as > runqueues, psi_cgroup_cpu, and more. > > Signed-off-by: Yafang Shao <laoar.shao@gmail.com> > --- > kernel/bpf/cpumask.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 48 insertions(+) > > diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c > index 2e73533..4ae07a4 100644 > --- a/kernel/bpf/cpumask.c > +++ b/kernel/bpf/cpumask.c > @@ -422,6 +422,51 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask) > return cpumask_weight(cpumask); > } > > +struct bpf_iter_cpumask { > + __u64 __opaque[2]; > +} __aligned(8); > + > +struct bpf_iter_cpumask_kern { > + struct cpumask *mask; > + int *cpu; > +} __aligned(8); > + > +__bpf_kfunc u32 bpf_iter_cpumask_new(struct bpf_iter_cpumask *it, struct cpumask *mask) > +{ > + struct bpf_iter_cpumask_kern *kit = (void *)it; > + > + kit->cpu = bpf_mem_alloc(&bpf_global_ma, sizeof(*kit->cpu)); why dynamic memory allocation of 4 bytes?... just have `int cpu;` field in bpf_iter_cpumask_kern? > + if (!kit->cpu) > + return -ENOMEM; > + > + kit->mask = mask; > + *kit->cpu = -1; > + return 0; > +} > + > +__bpf_kfunc int *bpf_iter_cpumask_next(struct bpf_iter_cpumask *it) > +{ > + struct bpf_iter_cpumask_kern *kit = (void *)it; > + struct cpumask *mask = kit->mask; > + int cpu; > + > + cpu = cpumask_next(*kit->cpu, mask); > + if (cpu >= nr_cpu_ids) > + return NULL; > + > + *kit->cpu = cpu; > + return kit->cpu; > +} > + > +__bpf_kfunc void bpf_iter_cpumask_destroy(struct bpf_iter_cpumask *it) > +{ > + struct bpf_iter_cpumask_kern *kit = (void *)it; > + > + if (!kit->cpu) > + return; > + bpf_mem_free(&bpf_global_ma, kit->cpu); > +} > + > __bpf_kfunc_end_defs(); > > BTF_SET8_START(cpumask_kfunc_btf_ids) > @@ -450,6 +495,9 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask) > BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU) > BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU) > BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU) > +BTF_ID_FLAGS(func, bpf_iter_cpumask_new, KF_ITER_NEW | KF_RCU) > +BTF_ID_FLAGS(func, bpf_iter_cpumask_next, KF_ITER_NEXT | KF_RET_NULL | KF_RCU) > +BTF_ID_FLAGS(func, bpf_iter_cpumask_destroy, KF_ITER_DESTROY) > BTF_SET8_END(cpumask_kfunc_btf_ids) > > static const struct btf_kfunc_id_set cpumask_kfunc_set = { > -- > 1.8.3.1 >
On Wed, Jan 3, 2024 at 6:13 AM Andrii Nakryiko <andrii.nakryiko@gmail.com> wrote: > > On Fri, Dec 22, 2023 at 3:31 AM Yafang Shao <laoar.shao@gmail.com> wrote: > > > > Add three new kfuncs for bpf_iter_cpumask. > > - bpf_iter_cpumask_new > > - bpf_iter_cpumask_next > > - bpf_iter_cpumask_destroy > > > > These new kfuncs facilitate the iteration of percpu data, such as > > runqueues, psi_cgroup_cpu, and more. > > > > Signed-off-by: Yafang Shao <laoar.shao@gmail.com> > > --- > > kernel/bpf/cpumask.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ > > 1 file changed, 48 insertions(+) > > > > diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c > > index 2e73533..4ae07a4 100644 > > --- a/kernel/bpf/cpumask.c > > +++ b/kernel/bpf/cpumask.c > > @@ -422,6 +422,51 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask) > > return cpumask_weight(cpumask); > > } > > > > +struct bpf_iter_cpumask { > > + __u64 __opaque[2]; > > +} __aligned(8); > > + > > +struct bpf_iter_cpumask_kern { > > + struct cpumask *mask; > > + int *cpu; > > +} __aligned(8); > > + > > +__bpf_kfunc u32 bpf_iter_cpumask_new(struct bpf_iter_cpumask *it, struct cpumask *mask) > > +{ > > + struct bpf_iter_cpumask_kern *kit = (void *)it; > > + > > + kit->cpu = bpf_mem_alloc(&bpf_global_ma, sizeof(*kit->cpu)); > > why dynamic memory allocation of 4 bytes?... just have `int cpu;` > field in bpf_iter_cpumask_kern? Will do it. Thanks for your suggestion.
diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index 2e73533..4ae07a4 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -422,6 +422,51 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask) return cpumask_weight(cpumask); } +struct bpf_iter_cpumask { + __u64 __opaque[2]; +} __aligned(8); + +struct bpf_iter_cpumask_kern { + struct cpumask *mask; + int *cpu; +} __aligned(8); + +__bpf_kfunc u32 bpf_iter_cpumask_new(struct bpf_iter_cpumask *it, struct cpumask *mask) +{ + struct bpf_iter_cpumask_kern *kit = (void *)it; + + kit->cpu = bpf_mem_alloc(&bpf_global_ma, sizeof(*kit->cpu)); + if (!kit->cpu) + return -ENOMEM; + + kit->mask = mask; + *kit->cpu = -1; + return 0; +} + +__bpf_kfunc int *bpf_iter_cpumask_next(struct bpf_iter_cpumask *it) +{ + struct bpf_iter_cpumask_kern *kit = (void *)it; + struct cpumask *mask = kit->mask; + int cpu; + + cpu = cpumask_next(*kit->cpu, mask); + if (cpu >= nr_cpu_ids) + return NULL; + + *kit->cpu = cpu; + return kit->cpu; +} + +__bpf_kfunc void bpf_iter_cpumask_destroy(struct bpf_iter_cpumask *it) +{ + struct bpf_iter_cpumask_kern *kit = (void *)it; + + if (!kit->cpu) + return; + bpf_mem_free(&bpf_global_ma, kit->cpu); +} + __bpf_kfunc_end_defs(); BTF_SET8_START(cpumask_kfunc_btf_ids) @@ -450,6 +495,9 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask) BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU) +BTF_ID_FLAGS(func, bpf_iter_cpumask_new, KF_ITER_NEW | KF_RCU) +BTF_ID_FLAGS(func, bpf_iter_cpumask_next, KF_ITER_NEXT | KF_RET_NULL | KF_RCU) +BTF_ID_FLAGS(func, bpf_iter_cpumask_destroy, KF_ITER_DESTROY) BTF_SET8_END(cpumask_kfunc_btf_ids) static const struct btf_kfunc_id_set cpumask_kfunc_set = {
Add three new kfuncs for bpf_iter_cpumask. - bpf_iter_cpumask_new - bpf_iter_cpumask_next - bpf_iter_cpumask_destroy These new kfuncs facilitate the iteration of percpu data, such as runqueues, psi_cgroup_cpu, and more. Signed-off-by: Yafang Shao <laoar.shao@gmail.com> --- kernel/bpf/cpumask.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+)