diff mbox series

[v2,bpf-next,3/4] bpf: inline bpf_map_lookup_elem() for PERCPU_ARRAY maps

Message ID 20240402021307.1012571-4-andrii@kernel.org (mailing list archive)
State Accepted
Commit db69718b8efac802c7cc20d5a6c7dfc913f99c43
Delegated to: BPF
Headers show
Series Add internal-only BPF per-CPU instruction | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 948 this patch: 948
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 9 maintainers not CCed: john.fastabend@gmail.com sdf@google.com kpsingh@kernel.org martin.lau@linux.dev yonghong.song@linux.dev haoluo@google.com jolsa@kernel.org song@kernel.org eddyz87@gmail.com
netdev/build_clang success Errors and warnings before: 955 this patch: 955
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 959 this patch: 959
netdev/checkpatch warning WARNING: line length of 82 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc

Commit Message

Andrii Nakryiko April 2, 2024, 2:13 a.m. UTC
Using new per-CPU BPF instruction implement inlining for per-CPU ARRAY
map lookup helper, if BPF JIT support is present.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 kernel/bpf/arraymap.c | 33 +++++++++++++++++++++++++++++++++
 1 file changed, 33 insertions(+)

Comments

John Fastabend April 2, 2024, 5:02 a.m. UTC | #1
Andrii Nakryiko wrote:
> Using new per-CPU BPF instruction implement inlining for per-CPU ARRAY
> map lookup helper, if BPF JIT support is present.
> 
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> ---
>  kernel/bpf/arraymap.c | 33 +++++++++++++++++++++++++++++++++
>  1 file changed, 33 insertions(+)
> 
> diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
> index 13358675ff2e..8c1e6d7654bb 100644
> --- a/kernel/bpf/arraymap.c
> +++ b/kernel/bpf/arraymap.c
> @@ -246,6 +246,38 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
>  	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
>  }
>  
> +/* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */
> +static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
> +{
> +	struct bpf_array *array = container_of(map, struct bpf_array, map);
> +	struct bpf_insn *insn = insn_buf;

Nit, If you wanted to be consistent with array_*_map_gen_lookup,

	const int ret = BPF_REG_0;
	const int map_ptr = BPF_REG_1;
	const int index = BPF_REG_2;

But, I think its easier to read as is.

> +
> +	if (!bpf_jit_supports_percpu_insn())
> +		return -EOPNOTSUPP;
> +
> +	if (map->map_flags & BPF_F_INNER_MAP)
> +		return -EOPNOTSUPP;
> +
> +	BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0);
> +	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs));
> +
> +	*insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0);
> +	if (!map->bypass_spec_v1) {
> +		*insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6);
> +		*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask);
> +	} else {
> +		*insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5);
> +	}
> +
> +	*insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
> +	*insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
> +	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
> +	*insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
> +	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
> +	*insn++ = BPF_MOV64_IMM(BPF_REG_0, 0);
> +	return insn - insn_buf;
> +}
> +
>  static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
>  {
>  	struct bpf_array *array = container_of(map, struct bpf_array, map);
> @@ -776,6 +808,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
>  	.map_free = array_map_free,
>  	.map_get_next_key = array_map_get_next_key,
>  	.map_lookup_elem = percpu_array_map_lookup_elem,
> +	.map_gen_lookup = percpu_array_map_gen_lookup,
>  	.map_update_elem = array_map_update_elem,
>  	.map_delete_elem = array_map_delete_elem,
>  	.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,

Acked-by: John Fastabend <john.fastabend@gmail.com>
Andrii Nakryiko April 2, 2024, 4:20 p.m. UTC | #2
On Mon, Apr 1, 2024 at 10:02 PM John Fastabend <john.fastabend@gmail.com> wrote:
>
> Andrii Nakryiko wrote:
> > Using new per-CPU BPF instruction implement inlining for per-CPU ARRAY
> > map lookup helper, if BPF JIT support is present.
> >
> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> > ---
> >  kernel/bpf/arraymap.c | 33 +++++++++++++++++++++++++++++++++
> >  1 file changed, 33 insertions(+)
> >
> > diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
> > index 13358675ff2e..8c1e6d7654bb 100644
> > --- a/kernel/bpf/arraymap.c
> > +++ b/kernel/bpf/arraymap.c
> > @@ -246,6 +246,38 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
> >       return this_cpu_ptr(array->pptrs[index & array->index_mask]);
> >  }
> >
> > +/* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */
> > +static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
> > +{
> > +     struct bpf_array *array = container_of(map, struct bpf_array, map);
> > +     struct bpf_insn *insn = insn_buf;
>
> Nit, If you wanted to be consistent with array_*_map_gen_lookup,
>

I didn't in this case, I found these "aliases" more confusing than helpful.

>         const int ret = BPF_REG_0;
>         const int map_ptr = BPF_REG_1;
>         const int index = BPF_REG_2;
>
> But, I think its easier to read as is.
>

Yep, that's what I thought as well.


> > +
> > +     if (!bpf_jit_supports_percpu_insn())
> > +             return -EOPNOTSUPP;
> > +
> > +     if (map->map_flags & BPF_F_INNER_MAP)
> > +             return -EOPNOTSUPP;
> > +
> > +     BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0);
> > +     *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs));
> > +
> > +     *insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0);
> > +     if (!map->bypass_spec_v1) {
> > +             *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6);
> > +             *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask);
> > +     } else {
> > +             *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5);
> > +     }
> > +
> > +     *insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
> > +     *insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
> > +     *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
> > +     *insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
> > +     *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
> > +     *insn++ = BPF_MOV64_IMM(BPF_REG_0, 0);
> > +     return insn - insn_buf;
> > +}
> > +
> >  static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
> >  {
> >       struct bpf_array *array = container_of(map, struct bpf_array, map);
> > @@ -776,6 +808,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
> >       .map_free = array_map_free,
> >       .map_get_next_key = array_map_get_next_key,
> >       .map_lookup_elem = percpu_array_map_lookup_elem,
> > +     .map_gen_lookup = percpu_array_map_gen_lookup,
> >       .map_update_elem = array_map_update_elem,
> >       .map_delete_elem = array_map_delete_elem,
> >       .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
>
> Acked-by: John Fastabend <john.fastabend@gmail.com>
diff mbox series

Patch

diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 13358675ff2e..8c1e6d7654bb 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -246,6 +246,38 @@  static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
 	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
 }
 
+/* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */
+static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+{
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	struct bpf_insn *insn = insn_buf;
+
+	if (!bpf_jit_supports_percpu_insn())
+		return -EOPNOTSUPP;
+
+	if (map->map_flags & BPF_F_INNER_MAP)
+		return -EOPNOTSUPP;
+
+	BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0);
+	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs));
+
+	*insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0);
+	if (!map->bypass_spec_v1) {
+		*insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6);
+		*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask);
+	} else {
+		*insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5);
+	}
+
+	*insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
+	*insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
+	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
+	*insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
+	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+	*insn++ = BPF_MOV64_IMM(BPF_REG_0, 0);
+	return insn - insn_buf;
+}
+
 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
@@ -776,6 +808,7 @@  const struct bpf_map_ops percpu_array_map_ops = {
 	.map_free = array_map_free,
 	.map_get_next_key = array_map_get_next_key,
 	.map_lookup_elem = percpu_array_map_lookup_elem,
+	.map_gen_lookup = percpu_array_map_gen_lookup,
 	.map_update_elem = array_map_update_elem,
 	.map_delete_elem = array_map_delete_elem,
 	.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,