diff mbox series

[bpf-next,v3,06/11] bpf: add arraymap support for bpf_for_each_map_elem() helper

Message ID 20210225073315.4121184-1-yhs@fb.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: add bpf_for_each_map_elem() helper | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 6 maintainers not CCed: netdev@vger.kernel.org kpsingh@kernel.org songliubraving@fb.com kafai@fb.com john.fastabend@gmail.com andrii@kernel.org
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 4 this patch: 4
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch fail CHECK: Alignment should match open parenthesis ERROR: that open brace { should be on the previous line
netdev/build_allmodconfig_warn success Errors and warnings before: 4 this patch: 4
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Yonghong Song Feb. 25, 2021, 7:33 a.m. UTC
This patch added support for arraymap and percpu arraymap.

Signed-off-by: Yonghong Song <yhs@fb.com>
---
 kernel/bpf/arraymap.c | 40 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)

Comments

Andrii Nakryiko Feb. 25, 2021, 10:48 p.m. UTC | #1
On Thu, Feb 25, 2021 at 1:35 AM Yonghong Song <yhs@fb.com> wrote:
>
> This patch added support for arraymap and percpu arraymap.
>
> Signed-off-by: Yonghong Song <yhs@fb.com>
> ---

index_mask is overcautious in this case, but otherwise lgtm

Acked-by: Andrii Nakryiko <andrii@kernel.org>

>  kernel/bpf/arraymap.c | 40 ++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 40 insertions(+)
>
> diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
> index 1f8453343bf2..4077a8ae7089 100644
> --- a/kernel/bpf/arraymap.c
> +++ b/kernel/bpf/arraymap.c
> @@ -625,6 +625,42 @@ static const struct bpf_iter_seq_info iter_seq_info = {
>         .seq_priv_size          = sizeof(struct bpf_iter_seq_array_map_info),
>  };
>
> +static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
> +                                  void *callback_ctx, u64 flags)
> +{
> +       u32 i, index, num_elems = 0;
> +       struct bpf_array *array;
> +       bool is_percpu;
> +       u64 ret = 0;
> +       void *val;
> +
> +       if (flags != 0)
> +               return -EINVAL;
> +
> +       is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
> +       array = container_of(map, struct bpf_array, map);
> +       if (is_percpu)
> +               migrate_disable();
> +       for (i = 0; i < map->max_entries; i++) {
> +               index = i & array->index_mask;

I don't think you need to use index_mask here, given you control i and
know that it will always be < map->max_entries.

> +               if (is_percpu)
> +                       val = this_cpu_ptr(array->pptrs[i]);
> +               else
> +                       val = array->value + array->elem_size * i;
> +               num_elems++;
> +               ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
> +                                       (u64)(long)&index, (u64)(long)val,
> +                                       (u64)(long)callback_ctx, 0);
> +               /* return value: 0 - continue, 1 - stop and return */
> +               if (ret)
> +                       break;
> +       }
> +
> +       if (is_percpu)
> +               migrate_enable();
> +       return num_elems;
> +}
> +
>  static int array_map_btf_id;
>  const struct bpf_map_ops array_map_ops = {
>         .map_meta_equal = array_map_meta_equal,
> @@ -643,6 +679,8 @@ const struct bpf_map_ops array_map_ops = {
>         .map_check_btf = array_map_check_btf,
>         .map_lookup_batch = generic_map_lookup_batch,
>         .map_update_batch = generic_map_update_batch,
> +       .map_set_for_each_callback_args = map_set_for_each_callback_args,
> +       .map_for_each_callback = bpf_for_each_array_elem,
>         .map_btf_name = "bpf_array",
>         .map_btf_id = &array_map_btf_id,
>         .iter_seq_info = &iter_seq_info,
> @@ -660,6 +698,8 @@ const struct bpf_map_ops percpu_array_map_ops = {
>         .map_delete_elem = array_map_delete_elem,
>         .map_seq_show_elem = percpu_array_map_seq_show_elem,
>         .map_check_btf = array_map_check_btf,
> +       .map_set_for_each_callback_args = map_set_for_each_callback_args,
> +       .map_for_each_callback = bpf_for_each_array_elem,
>         .map_btf_name = "bpf_array",
>         .map_btf_id = &percpu_array_map_btf_id,
>         .iter_seq_info = &iter_seq_info,
> --
> 2.24.1
>
diff mbox series

Patch

diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 1f8453343bf2..4077a8ae7089 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -625,6 +625,42 @@  static const struct bpf_iter_seq_info iter_seq_info = {
 	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
 };
 
+static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
+				   void *callback_ctx, u64 flags)
+{
+	u32 i, index, num_elems = 0;
+	struct bpf_array *array;
+	bool is_percpu;
+	u64 ret = 0;
+	void *val;
+
+	if (flags != 0)
+		return -EINVAL;
+
+	is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
+	array = container_of(map, struct bpf_array, map);
+	if (is_percpu)
+		migrate_disable();
+	for (i = 0; i < map->max_entries; i++) {
+		index = i & array->index_mask;
+		if (is_percpu)
+			val = this_cpu_ptr(array->pptrs[i]);
+		else
+			val = array->value + array->elem_size * i;
+		num_elems++;
+		ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
+					(u64)(long)&index, (u64)(long)val,
+					(u64)(long)callback_ctx, 0);
+		/* return value: 0 - continue, 1 - stop and return */
+		if (ret)
+			break;
+	}
+
+	if (is_percpu)
+		migrate_enable();
+	return num_elems;
+}
+
 static int array_map_btf_id;
 const struct bpf_map_ops array_map_ops = {
 	.map_meta_equal = array_map_meta_equal,
@@ -643,6 +679,8 @@  const struct bpf_map_ops array_map_ops = {
 	.map_check_btf = array_map_check_btf,
 	.map_lookup_batch = generic_map_lookup_batch,
 	.map_update_batch = generic_map_update_batch,
+	.map_set_for_each_callback_args = map_set_for_each_callback_args,
+	.map_for_each_callback = bpf_for_each_array_elem,
 	.map_btf_name = "bpf_array",
 	.map_btf_id = &array_map_btf_id,
 	.iter_seq_info = &iter_seq_info,
@@ -660,6 +698,8 @@  const struct bpf_map_ops percpu_array_map_ops = {
 	.map_delete_elem = array_map_delete_elem,
 	.map_seq_show_elem = percpu_array_map_seq_show_elem,
 	.map_check_btf = array_map_check_btf,
+	.map_set_for_each_callback_args = map_set_for_each_callback_args,
+	.map_for_each_callback = bpf_for_each_array_elem,
 	.map_btf_name = "bpf_array",
 	.map_btf_id = &percpu_array_map_btf_id,
 	.iter_seq_info = &iter_seq_info,