diff mbox series

[bpf-next,v2,04/15] bpf: Support new unconditional bswap instruction

Message ID 20230713060739.390659-1-yhs@fb.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: Support new insns from cpu v4 | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ${{ matrix.test }} on ${{ matrix.arch }} with ${{ matrix.toolchain_full }}
bpf/vmtest-bpf-next-VM_Test-2 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-3 fail Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 fail Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-5 fail Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 fail Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-7 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-8 success Logs for veristat
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 27 this patch: 27
netdev/cc_maintainers warning 16 maintainers not CCed: tglx@linutronix.de hpa@zytor.com dsahern@kernel.org mingo@redhat.com kpsingh@kernel.org x86@kernel.org john.fastabend@gmail.com sdf@google.com netdev@vger.kernel.org martin.lau@linux.dev song@kernel.org dave.hansen@linux.intel.com davem@davemloft.net jolsa@kernel.org haoluo@google.com bp@alien8.de
netdev/build_clang fail Errors and warnings before: 18 this patch: 18
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 27 this patch: 27
netdev/checkpatch warning CHECK: No space is necessary after a cast WARNING: labels should not be indented WARNING: line length of 99 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Yonghong Song July 13, 2023, 6:07 a.m. UTC
The existing 'be' and 'le' insns will do conditional bswap
depends on host endianness. This patch implements
unconditional bswap insns.

Signed-off-by: Yonghong Song <yhs@fb.com>
---
 arch/x86/net/bpf_jit_comp.c |  1 +
 kernel/bpf/core.c           | 14 ++++++++++++++
 kernel/bpf/verifier.c       |  2 +-
 3 files changed, 16 insertions(+), 1 deletion(-)

Comments

Eduard Zingerman July 17, 2023, 1:42 a.m. UTC | #1
On Wed, 2023-07-12 at 23:07 -0700, Yonghong Song wrote:
> > The existing 'be' and 'le' insns will do conditional bswap
> > depends on host endianness. This patch implements
> > unconditional bswap insns.
> > 
> > Signed-off-by: Yonghong Song <yhs@fb.com>

Note sure if this is important, but here is an observation:
function is_reg64() has the following code:

 ...
 if (class == BPF_ALU64 || class == BPF_JMP ||
 /* BPF_END always use BPF_ALU class. */
 (class == BPF_ALU && op == BPF_END && insn->imm == 64))
 return true;
 ...

It probably has to be updated but I'm not sure how:
- either check insn->imm == 64 for ALU64 as well;
- or just update the comment, given that instruction always sets all 64-bits.

> > ---
> >  arch/x86/net/bpf_jit_comp.c |  1 +
> >  kernel/bpf/core.c           | 14 ++++++++++++++
> >  kernel/bpf/verifier.c       |  2 +-
> >  3 files changed, 16 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> > index a740a1a6e71d..adda5e7626b4 100644
> > --- a/arch/x86/net/bpf_jit_comp.c
> > +++ b/arch/x86/net/bpf_jit_comp.c
> > @@ -1322,6 +1322,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
> >  			break;
> >  
> >  		case BPF_ALU | BPF_END | BPF_FROM_BE:
> > +		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
> >  			switch (imm32) {
> >  			case 16:
> >  				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
> > diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> > index fe648a158c9e..86bb412fee39 100644
> > --- a/kernel/bpf/core.c
> > +++ b/kernel/bpf/core.c
> > @@ -1524,6 +1524,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
> >  	INSN_3(ALU64, DIV,  X),			\
> >  	INSN_3(ALU64, MOD,  X),			\
> >  	INSN_2(ALU64, NEG),			\
> > +	INSN_3(ALU64, END, TO_LE),		\
> >  	/*   Immediate based. */		\
> >  	INSN_3(ALU64, ADD,  K),			\
> >  	INSN_3(ALU64, SUB,  K),			\
> > @@ -1845,6 +1846,19 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
> >  			break;
> >  		}
> >  		CONT;
> > +	ALU64_END_TO_LE:
> > +		switch (IMM) {
> > +		case 16:
> > +			DST = (__force u16) __swab16(DST);
> > +			break;
> > +		case 32:
> > +			DST = (__force u32) __swab32(DST);
> > +			break;
> > +		case 64:
> > +			DST = (__force u64) __swab64(DST);
> > +			break;
> > +		}
> > +		CONT;
> >  
> >  	/* CALL */
> >  	JMP_CALL:
> > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> > index 5fee9f24cb5e..22ba0744547b 100644
> > --- a/kernel/bpf/verifier.c
> > +++ b/kernel/bpf/verifier.c
> > @@ -13036,7 +13036,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
> >  		} else {
> >  			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
> >  			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
> > -			    BPF_CLASS(insn->code) == BPF_ALU64) {
> > +			    (BPF_CLASS(insn->code) == BPF_ALU64 && BPF_SRC(insn->code) != BPF_K)) {
> >  				verbose(env, "BPF_END uses reserved fields\n");
> >  				return -EINVAL;
> >  			}
Yonghong Song July 19, 2023, 1:22 a.m. UTC | #2
On 7/16/23 6:42 PM, Eduard Zingerman wrote:
> On Wed, 2023-07-12 at 23:07 -0700, Yonghong Song wrote:
>>> The existing 'be' and 'le' insns will do conditional bswap
>>> depends on host endianness. This patch implements
>>> unconditional bswap insns.
>>>
>>> Signed-off-by: Yonghong Song <yhs@fb.com>
> 
> Note sure if this is important, but here is an observation:
> function is_reg64() has the following code:
> 
>   ...
>   if (class == BPF_ALU64 || class == BPF_JMP ||
>   /* BPF_END always use BPF_ALU class. */
>   (class == BPF_ALU && op == BPF_END && insn->imm == 64))
>   return true;
>   ...
> 
> It probably has to be updated but I'm not sure how:
> - either check insn->imm == 64 for ALU64 as well;
> - or just update the comment, given that instruction always sets all 64-bits.

I think we can remove insn->imm == 64 from condition.
But I need to double check.

> 
>>> ---
>>>   arch/x86/net/bpf_jit_comp.c |  1 +
>>>   kernel/bpf/core.c           | 14 ++++++++++++++
>>>   kernel/bpf/verifier.c       |  2 +-
>>>   3 files changed, 16 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
>>> index a740a1a6e71d..adda5e7626b4 100644
>>> --- a/arch/x86/net/bpf_jit_comp.c
>>> +++ b/arch/x86/net/bpf_jit_comp.c
>>> @@ -1322,6 +1322,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
>>>   			break;
>>>   
>>>   		case BPF_ALU | BPF_END | BPF_FROM_BE:
>>> +		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
>>>   			switch (imm32) {
>>>   			case 16:
>>>   				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
>>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>>> index fe648a158c9e..86bb412fee39 100644
>>> --- a/kernel/bpf/core.c
>>> +++ b/kernel/bpf/core.c
>>> @@ -1524,6 +1524,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
>>>   	INSN_3(ALU64, DIV,  X),			\
>>>   	INSN_3(ALU64, MOD,  X),			\
>>>   	INSN_2(ALU64, NEG),			\
>>> +	INSN_3(ALU64, END, TO_LE),		\
>>>   	/*   Immediate based. */		\
>>>   	INSN_3(ALU64, ADD,  K),			\
>>>   	INSN_3(ALU64, SUB,  K),			\
>>> @@ -1845,6 +1846,19 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
>>>   			break;
>>>   		}
>>>   		CONT;
>>> +	ALU64_END_TO_LE:
>>> +		switch (IMM) {
>>> +		case 16:
>>> +			DST = (__force u16) __swab16(DST);
>>> +			break;
>>> +		case 32:
>>> +			DST = (__force u32) __swab32(DST);
>>> +			break;
>>> +		case 64:
>>> +			DST = (__force u64) __swab64(DST);
>>> +			break;
>>> +		}
>>> +		CONT;
>>>   
>>>   	/* CALL */
>>>   	JMP_CALL:
>>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>>> index 5fee9f24cb5e..22ba0744547b 100644
>>> --- a/kernel/bpf/verifier.c
>>> +++ b/kernel/bpf/verifier.c
>>> @@ -13036,7 +13036,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
>>>   		} else {
>>>   			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
>>>   			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
>>> -			    BPF_CLASS(insn->code) == BPF_ALU64) {
>>> +			    (BPF_CLASS(insn->code) == BPF_ALU64 && BPF_SRC(insn->code) != BPF_K)) {
>>>   				verbose(env, "BPF_END uses reserved fields\n");
>>>   				return -EINVAL;
>>>   			}
>
diff mbox series

Patch

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a740a1a6e71d..adda5e7626b4 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1322,6 +1322,7 @@  static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
 			break;
 
 		case BPF_ALU | BPF_END | BPF_FROM_BE:
+		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
 			switch (imm32) {
 			case 16:
 				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index fe648a158c9e..86bb412fee39 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1524,6 +1524,7 @@  EXPORT_SYMBOL_GPL(__bpf_call_base);
 	INSN_3(ALU64, DIV,  X),			\
 	INSN_3(ALU64, MOD,  X),			\
 	INSN_2(ALU64, NEG),			\
+	INSN_3(ALU64, END, TO_LE),		\
 	/*   Immediate based. */		\
 	INSN_3(ALU64, ADD,  K),			\
 	INSN_3(ALU64, SUB,  K),			\
@@ -1845,6 +1846,19 @@  static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
 			break;
 		}
 		CONT;
+	ALU64_END_TO_LE:
+		switch (IMM) {
+		case 16:
+			DST = (__force u16) __swab16(DST);
+			break;
+		case 32:
+			DST = (__force u32) __swab32(DST);
+			break;
+		case 64:
+			DST = (__force u64) __swab64(DST);
+			break;
+		}
+		CONT;
 
 	/* CALL */
 	JMP_CALL:
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5fee9f24cb5e..22ba0744547b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -13036,7 +13036,7 @@  static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 		} else {
 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
-			    BPF_CLASS(insn->code) == BPF_ALU64) {
+			    (BPF_CLASS(insn->code) == BPF_ALU64 && BPF_SRC(insn->code) != BPF_K)) {
 				verbose(env, "BPF_END uses reserved fields\n");
 				return -EINVAL;
 			}