diff mbox series

[v2,bpf-next,08/20] bpf: Add x86-64 JIT support for bpf_cast_user instruction.

Message ID 20240209040608.98927-9-alexei.starovoitov@gmail.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series bpf: Introduce BPF arena. | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-25 fail Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-llvm-18 / test
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-4 fail Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-9 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-8 fail Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for s390x-gcc / test
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-12 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-13 fail Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-15 success Logs for x86_64-gcc / test
bpf/vmtest-bpf-next-VM_Test-16 success Logs for x86_64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-17 fail Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-llvm-17 / test
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-21 fail Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-llvm-18 / test
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-18 fail Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
netdev/series_format fail Series longer than 15 patches (and no cover letter)
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 2389 this patch: 2389
netdev/build_tools success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 21 maintainers not CCed: john.fastabend@gmail.com nathan@kernel.org netdev@vger.kernel.org dsahern@kernel.org mingo@redhat.com morbo@google.com x86@kernel.org kpsingh@kernel.org llvm@lists.linux.dev bp@alien8.de jolsa@kernel.org hpa@zytor.com yonghong.song@linux.dev martin.lau@linux.dev song@kernel.org sdf@google.com dave.hansen@linux.intel.com justinstitt@google.com tglx@linutronix.de ndesaulniers@google.com haoluo@google.com
netdev/build_clang success Errors and warnings before: 1107 this patch: 1107
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 2469 this patch: 2469
netdev/checkpatch warning + /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */ WARNING: line length of 100 exceeds 80 columns WARNING: line length of 105 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 95 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Alexei Starovoitov Feb. 9, 2024, 4:05 a.m. UTC
From: Alexei Starovoitov <ast@kernel.org>

LLVM generates bpf_cast_kern and bpf_cast_user instructions while translating
pointers with __attribute__((address_space(1))).

rX = cast_kern(rY) is processed by the verifier and converted to
normal 32-bit move: wX = wY

bpf_cast_user has to be converted by JIT.

rX = cast_user(rY) is

aux_reg = upper_32_bits of arena->user_vm_start
aux_reg <<= 32
wX = wY // clear upper 32 bits of dst register
if (wX) // if not zero add upper bits of user_vm_start
  wX |= aux_reg

JIT can do it more efficiently:

mov dst_reg32, src_reg32  // 32-bit move
shl dst_reg, 32
or dst_reg, user_vm_start
rol dst_reg, 32
xor r11, r11
test dst_reg32, dst_reg32 // check if lower 32-bit are zero
cmove r11, dst_reg	  // if so, set dst_reg to zero
			  // Intel swapped src/dst register encoding in CMOVcc

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 arch/x86/net/bpf_jit_comp.c | 41 ++++++++++++++++++++++++++++++++++++-
 include/linux/filter.h      |  1 +
 kernel/bpf/core.c           |  5 +++++
 3 files changed, 46 insertions(+), 1 deletion(-)

Comments

Eduard Zingerman Feb. 10, 2024, 1:15 a.m. UTC | #1
On Thu, 2024-02-08 at 20:05 -0800, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> LLVM generates bpf_cast_kern and bpf_cast_user instructions while translating
> pointers with __attribute__((address_space(1))).
> 
> rX = cast_kern(rY) is processed by the verifier and converted to
> normal 32-bit move: wX = wY
> 
> bpf_cast_user has to be converted by JIT.
> 
> rX = cast_user(rY) is
> 
> aux_reg = upper_32_bits of arena->user_vm_start
> aux_reg <<= 32
> wX = wY // clear upper 32 bits of dst register
> if (wX) // if not zero add upper bits of user_vm_start
>   wX |= aux_reg
> 
> JIT can do it more efficiently:
> 
> mov dst_reg32, src_reg32  // 32-bit move
> shl dst_reg, 32
> or dst_reg, user_vm_start
> rol dst_reg, 32
> xor r11, r11
> test dst_reg32, dst_reg32 // check if lower 32-bit are zero
> cmove r11, dst_reg	  // if so, set dst_reg to zero
> 			  // Intel swapped src/dst register encoding in CMOVcc
> 
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>

Checked generated x86 code for all reg combinations, works as expected.
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Kumar Kartikeya Dwivedi Feb. 10, 2024, 8:40 a.m. UTC | #2
On Fri, 9 Feb 2024 at 05:06, Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> From: Alexei Starovoitov <ast@kernel.org>
>
> LLVM generates bpf_cast_kern and bpf_cast_user instructions while translating
> pointers with __attribute__((address_space(1))).
>
> rX = cast_kern(rY) is processed by the verifier and converted to
> normal 32-bit move: wX = wY
>
> bpf_cast_user has to be converted by JIT.
>
> rX = cast_user(rY) is
>
> aux_reg = upper_32_bits of arena->user_vm_start
> aux_reg <<= 32
> wX = wY // clear upper 32 bits of dst register
> if (wX) // if not zero add upper bits of user_vm_start
>   wX |= aux_reg
>

Would this be ok if the rY is somehow aligned at the 4GB boundary, and
the lower 32-bits end up being zero.
Then this transformation would confuse it with the NULL case, right?
Or do I miss something?

> JIT can do it more efficiently:
>
> mov dst_reg32, src_reg32  // 32-bit move
> shl dst_reg, 32
> or dst_reg, user_vm_start
> rol dst_reg, 32
> xor r11, r11
> test dst_reg32, dst_reg32 // check if lower 32-bit are zero
> cmove r11, dst_reg        // if so, set dst_reg to zero
>                           // Intel swapped src/dst register encoding in CMOVcc
>
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> ---
>  arch/x86/net/bpf_jit_comp.c | 41 ++++++++++++++++++++++++++++++++++++-
>  include/linux/filter.h      |  1 +
>  kernel/bpf/core.c           |  5 +++++
>  3 files changed, 46 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> index 883b7f604b9a..a042ed57af7b 100644
> --- a/arch/x86/net/bpf_jit_comp.c
> +++ b/arch/x86/net/bpf_jit_comp.c
> @@ -1272,13 +1272,14 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
>         bool tail_call_seen = false;
>         bool seen_exit = false;
>         u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
> -       u64 arena_vm_start;
> +       u64 arena_vm_start, user_vm_start;
>         int i, excnt = 0;
>         int ilen, proglen = 0;
>         u8 *prog = temp;
>         int err;
>
>         arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
> +       user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
>
>         detect_reg_usage(insn, insn_cnt, callee_regs_used,
>                          &tail_call_seen);
> @@ -1346,6 +1347,39 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
>                         break;
>
>                 case BPF_ALU64 | BPF_MOV | BPF_X:
> +                       if (insn->off == BPF_ARENA_CAST_USER) {
> +                               if (dst_reg != src_reg)
> +                                       /* 32-bit mov */
> +                                       emit_mov_reg(&prog, false, dst_reg, src_reg);
> +                               /* shl dst_reg, 32 */
> +                               maybe_emit_1mod(&prog, dst_reg, true);
> +                               EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32);
> +
> +                               /* or dst_reg, user_vm_start */
> +                               maybe_emit_1mod(&prog, dst_reg, true);
> +                               if (is_axreg(dst_reg))
> +                                       EMIT1_off32(0x0D,  user_vm_start >> 32);
> +                               else
> +                                       EMIT2_off32(0x81, add_1reg(0xC8, dst_reg),  user_vm_start >> 32);
> +
> +                               /* rol dst_reg, 32 */
> +                               maybe_emit_1mod(&prog, dst_reg, true);
> +                               EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32);
> +
> +                               /* xor r11, r11 */
> +                               EMIT3(0x4D, 0x31, 0xDB);
> +
> +                               /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
> +                               maybe_emit_mod(&prog, dst_reg, dst_reg, false);
> +                               EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
> +
> +                               /* cmove r11, dst_reg; if so, set dst_reg to zero */
> +                               /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
> +                               maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
> +                               EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
> +                               break;
> +                       }
> +                       fallthrough;
>                 case BPF_ALU | BPF_MOV | BPF_X:
>                         if (insn->off == 0)
>                                 emit_mov_reg(&prog,
> @@ -3424,6 +3458,11 @@ void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
>         }
>  }
>
> +bool bpf_jit_supports_arena(void)
> +{
> +       return true;
> +}
> +
>  bool bpf_jit_supports_ptr_xchg(void)
>  {
>         return true;
> diff --git a/include/linux/filter.h b/include/linux/filter.h
> index cd76d43412d0..78ea63002531 100644
> --- a/include/linux/filter.h
> +++ b/include/linux/filter.h
> @@ -959,6 +959,7 @@ bool bpf_jit_supports_kfunc_call(void);
>  bool bpf_jit_supports_far_kfunc_call(void);
>  bool bpf_jit_supports_exceptions(void);
>  bool bpf_jit_supports_ptr_xchg(void);
> +bool bpf_jit_supports_arena(void);
>  void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
>  bool bpf_helper_changes_pkt_data(void *func);
>
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index 2539d9bfe369..2829077f0461 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -2926,6 +2926,11 @@ bool __weak bpf_jit_supports_far_kfunc_call(void)
>         return false;
>  }
>
> +bool __weak bpf_jit_supports_arena(void)
> +{
> +       return false;
> +}
> +
>  /* Return TRUE if the JIT backend satisfies the following two conditions:
>   * 1) JIT backend supports atomic_xchg() on pointer-sized words.
>   * 2) Under the specific arch, the implementation of xchg() is the same
> --
> 2.34.1
>
Alexei Starovoitov Feb. 13, 2024, 10:28 p.m. UTC | #3
On Sat, Feb 10, 2024 at 12:40 AM Kumar Kartikeya Dwivedi
<memxor@gmail.com> wrote:
>
> On Fri, 9 Feb 2024 at 05:06, Alexei Starovoitov
> <alexei.starovoitov@gmail.com> wrote:
> >
> > From: Alexei Starovoitov <ast@kernel.org>
> >
> > LLVM generates bpf_cast_kern and bpf_cast_user instructions while translating
> > pointers with __attribute__((address_space(1))).
> >
> > rX = cast_kern(rY) is processed by the verifier and converted to
> > normal 32-bit move: wX = wY
> >
> > bpf_cast_user has to be converted by JIT.
> >
> > rX = cast_user(rY) is
> >
> > aux_reg = upper_32_bits of arena->user_vm_start
> > aux_reg <<= 32
> > wX = wY // clear upper 32 bits of dst register
> > if (wX) // if not zero add upper bits of user_vm_start
> >   wX |= aux_reg
> >
>
> Would this be ok if the rY is somehow aligned at the 4GB boundary, and
> the lower 32-bits end up being zero.
> Then this transformation would confuse it with the NULL case, right?

yes. it will. I tried to fix it by reserving a zero page,
but the end result was bad. See discussion with Barret.
So we decided to drop this idea.
Might come back to it eventually.
Also, I was thinking of doing
if (rX) instead of if (wX) to mitigate a bit,
but that is probably wrong too.
The best is to mitigate this inside bpf program by never returning lo32 zero
from bpf_alloc() function.
In general with the latest llvm we see close to zero cast_user
when bpf prog is not mixing (void *) with (void __arena *) casts,
so it shouldn't be an issue in practice with patches as-is.
diff mbox series

Patch

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 883b7f604b9a..a042ed57af7b 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1272,13 +1272,14 @@  static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
 	bool tail_call_seen = false;
 	bool seen_exit = false;
 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
-	u64 arena_vm_start;
+	u64 arena_vm_start, user_vm_start;
 	int i, excnt = 0;
 	int ilen, proglen = 0;
 	u8 *prog = temp;
 	int err;
 
 	arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
+	user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
 
 	detect_reg_usage(insn, insn_cnt, callee_regs_used,
 			 &tail_call_seen);
@@ -1346,6 +1347,39 @@  static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
 			break;
 
 		case BPF_ALU64 | BPF_MOV | BPF_X:
+			if (insn->off == BPF_ARENA_CAST_USER) {
+				if (dst_reg != src_reg)
+					/* 32-bit mov */
+					emit_mov_reg(&prog, false, dst_reg, src_reg);
+				/* shl dst_reg, 32 */
+				maybe_emit_1mod(&prog, dst_reg, true);
+				EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32);
+
+				/* or dst_reg, user_vm_start */
+				maybe_emit_1mod(&prog, dst_reg, true);
+				if (is_axreg(dst_reg))
+					EMIT1_off32(0x0D,  user_vm_start >> 32);
+				else
+					EMIT2_off32(0x81, add_1reg(0xC8, dst_reg),  user_vm_start >> 32);
+
+				/* rol dst_reg, 32 */
+				maybe_emit_1mod(&prog, dst_reg, true);
+				EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32);
+
+				/* xor r11, r11 */
+				EMIT3(0x4D, 0x31, 0xDB);
+
+				/* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
+				maybe_emit_mod(&prog, dst_reg, dst_reg, false);
+				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
+
+				/* cmove r11, dst_reg; if so, set dst_reg to zero */
+				/* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
+				maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
+				EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
+				break;
+			}
+			fallthrough;
 		case BPF_ALU | BPF_MOV | BPF_X:
 			if (insn->off == 0)
 				emit_mov_reg(&prog,
@@ -3424,6 +3458,11 @@  void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
 	}
 }
 
+bool bpf_jit_supports_arena(void)
+{
+	return true;
+}
+
 bool bpf_jit_supports_ptr_xchg(void)
 {
 	return true;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index cd76d43412d0..78ea63002531 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -959,6 +959,7 @@  bool bpf_jit_supports_kfunc_call(void);
 bool bpf_jit_supports_far_kfunc_call(void);
 bool bpf_jit_supports_exceptions(void);
 bool bpf_jit_supports_ptr_xchg(void);
+bool bpf_jit_supports_arena(void);
 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
 bool bpf_helper_changes_pkt_data(void *func);
 
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 2539d9bfe369..2829077f0461 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2926,6 +2926,11 @@  bool __weak bpf_jit_supports_far_kfunc_call(void)
 	return false;
 }
 
+bool __weak bpf_jit_supports_arena(void)
+{
+	return false;
+}
+
 /* Return TRUE if the JIT backend satisfies the following two conditions:
  * 1) JIT backend supports atomic_xchg() on pointer-sized words.
  * 2) Under the specific arch, the implementation of xchg() is the same