diff mbox series

[bpf-next,v2,6/6] riscv, bpf: Optimize bswap insns with Zbb support

Message ID 20230919035839.3297328-7-pulehui@huaweicloud.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series Zbb support and code simplification for RV64 JIT | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 9 this patch: 9
netdev/cc_maintainers warning 4 maintainers not CCed: aou@eecs.berkeley.edu xi.wang@gmail.com yonghong.song@linux.dev paul.walmsley@sifive.com
netdev/build_clang success Errors and warnings before: 9 this patch: 9
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 9 this patch: 9
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 143 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-0 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-5 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-1 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 fail Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-22 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-28 success Logs for veristat
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for test_maps on s390x with gcc

Commit Message

Pu Lehui Sept. 19, 2023, 3:58 a.m. UTC
From: Pu Lehui <pulehui@huawei.com>

Optimize bswap instructions by rev8 Zbb instruction conbined with srli
instruction. And Optimize 16-bit zero-extension with Zbb support.

Signed-off-by: Pu Lehui <pulehui@huawei.com>
---
 arch/riscv/net/bpf_jit.h        | 67 +++++++++++++++++++++++++++++++++
 arch/riscv/net/bpf_jit_comp64.c | 50 +-----------------------
 2 files changed, 69 insertions(+), 48 deletions(-)

Comments

Björn Töpel Sept. 28, 2023, 11:08 a.m. UTC | #1
Pu Lehui <pulehui@huaweicloud.com> writes:

> From: Pu Lehui <pulehui@huawei.com>
>
> Optimize bswap instructions by rev8 Zbb instruction conbined with srli
> instruction. And Optimize 16-bit zero-extension with Zbb support.
>
> Signed-off-by: Pu Lehui <pulehui@huawei.com>
> ---
>  arch/riscv/net/bpf_jit.h        | 67 +++++++++++++++++++++++++++++++++
>  arch/riscv/net/bpf_jit_comp64.c | 50 +-----------------------
>  2 files changed, 69 insertions(+), 48 deletions(-)
>
> diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
> index 944bdd6e4..a04eed672 100644
> --- a/arch/riscv/net/bpf_jit.h
> +++ b/arch/riscv/net/bpf_jit.h
> @@ -1135,12 +1135,79 @@ static inline void emit_sextw(u8 rd, u8 rs, struct rv_jit_context *ctx)
>  	emit_addiw(rd, rs, 0, ctx);
>  }
>  
> +static inline void emit_zexth(u8 rd, u8 rs, struct rv_jit_context *ctx)
> +{
> +	if (rvzbb_enabled()) {
> +		emit(rvzbb_zexth(rd, rs), ctx);
> +	} else {
> +		emit_slli(rd, rs, 48, ctx);
> +		emit_srli(rd, rd, 48, ctx);
> +	}
> +}
> +

Prefer early-exit.

>  static inline void emit_zextw(u8 rd, u8 rs, struct rv_jit_context *ctx)
>  {
>  	emit_slli(rd, rs, 32, ctx);
>  	emit_srli(rd, rd, 32, ctx);
>  }
>  
> +static inline void emit_bswap(u8 rd, s32 imm, struct rv_jit_context *ctx)
> +{
> +	if (rvzbb_enabled()) {
> +		int bits = 64 - imm;
> +
> +		emit(rvzbb_rev8(rd, rd), ctx);
> +		if (bits)
> +			emit_srli(rd, rd, bits, ctx);
> +	} else {
> +		emit_li(RV_REG_T2, 0, ctx);
> +
> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
> +		emit_srli(rd, rd, 8, ctx);
> +		if (imm == 16)
> +			goto out_be;
> +
> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
> +		emit_srli(rd, rd, 8, ctx);
> +
> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
> +		emit_srli(rd, rd, 8, ctx);
> +		if (imm == 32)
> +			goto out_be;
> +
> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
> +		emit_srli(rd, rd, 8, ctx);
> +
> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
> +		emit_srli(rd, rd, 8, ctx);
> +
> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
> +		emit_srli(rd, rd, 8, ctx);
> +
> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
> +		emit_srli(rd, rd, 8, ctx);
> +out_be:
> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
> +
> +		emit_mv(rd, RV_REG_T2, ctx);
> +	}
> +}

Definitely early-exit for this one!

This function really show-cases why ZBB is nice! ;-)

I'll take the next rev of series for a test!


Björn
Pu Lehui Jan. 15, 2024, 12:26 p.m. UTC | #2
On 2023/9/28 19:08, Björn Töpel wrote:
> Pu Lehui <pulehui@huaweicloud.com> writes:
> 
>> From: Pu Lehui <pulehui@huawei.com>
>>
>> Optimize bswap instructions by rev8 Zbb instruction conbined with srli
>> instruction. And Optimize 16-bit zero-extension with Zbb support.
>>
>> Signed-off-by: Pu Lehui <pulehui@huawei.com>
>> ---
>>   arch/riscv/net/bpf_jit.h        | 67 +++++++++++++++++++++++++++++++++
>>   arch/riscv/net/bpf_jit_comp64.c | 50 +-----------------------
>>   2 files changed, 69 insertions(+), 48 deletions(-)
>>
>> diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
>> index 944bdd6e4..a04eed672 100644
>> --- a/arch/riscv/net/bpf_jit.h
>> +++ b/arch/riscv/net/bpf_jit.h
>> @@ -1135,12 +1135,79 @@ static inline void emit_sextw(u8 rd, u8 rs, struct rv_jit_context *ctx)
>>   	emit_addiw(rd, rs, 0, ctx);
>>   }
>>   
>> +static inline void emit_zexth(u8 rd, u8 rs, struct rv_jit_context *ctx)
>> +{
>> +	if (rvzbb_enabled()) {
>> +		emit(rvzbb_zexth(rd, rs), ctx);
>> +	} else {
>> +		emit_slli(rd, rs, 48, ctx);
>> +		emit_srli(rd, rd, 48, ctx);
>> +	}
>> +}
>> +
> 
> Prefer early-exit.
> 
>>   static inline void emit_zextw(u8 rd, u8 rs, struct rv_jit_context *ctx)
>>   {
>>   	emit_slli(rd, rs, 32, ctx);
>>   	emit_srli(rd, rd, 32, ctx);
>>   }
>>   
>> +static inline void emit_bswap(u8 rd, s32 imm, struct rv_jit_context *ctx)
>> +{
>> +	if (rvzbb_enabled()) {
>> +		int bits = 64 - imm;
>> +
>> +		emit(rvzbb_rev8(rd, rd), ctx);
>> +		if (bits)
>> +			emit_srli(rd, rd, bits, ctx);
>> +	} else {
>> +		emit_li(RV_REG_T2, 0, ctx);
>> +
>> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
>> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
>> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
>> +		emit_srli(rd, rd, 8, ctx);
>> +		if (imm == 16)
>> +			goto out_be;
>> +
>> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
>> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
>> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
>> +		emit_srli(rd, rd, 8, ctx);
>> +
>> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
>> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
>> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
>> +		emit_srli(rd, rd, 8, ctx);
>> +		if (imm == 32)
>> +			goto out_be;
>> +
>> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
>> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
>> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
>> +		emit_srli(rd, rd, 8, ctx);
>> +
>> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
>> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
>> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
>> +		emit_srli(rd, rd, 8, ctx);
>> +
>> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
>> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
>> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
>> +		emit_srli(rd, rd, 8, ctx);
>> +
>> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
>> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
>> +		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
>> +		emit_srli(rd, rd, 8, ctx);
>> +out_be:
>> +		emit_andi(RV_REG_T1, rd, 0xff, ctx);
>> +		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
>> +
>> +		emit_mv(rd, RV_REG_T2, ctx);
>> +	}
>> +}
> 
> Definitely early-exit for this one!
> 
> This function really show-cases why ZBB is nice! ;-)
> 
> I'll take the next rev of series for a test!
> 

Okay, the relevant modifications will be presented in v3 and will be 
sent soon.

> 
> Björn
diff mbox series

Patch

diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index 944bdd6e4..a04eed672 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -1135,12 +1135,79 @@  static inline void emit_sextw(u8 rd, u8 rs, struct rv_jit_context *ctx)
 	emit_addiw(rd, rs, 0, ctx);
 }
 
+static inline void emit_zexth(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+	if (rvzbb_enabled()) {
+		emit(rvzbb_zexth(rd, rs), ctx);
+	} else {
+		emit_slli(rd, rs, 48, ctx);
+		emit_srli(rd, rd, 48, ctx);
+	}
+}
+
 static inline void emit_zextw(u8 rd, u8 rs, struct rv_jit_context *ctx)
 {
 	emit_slli(rd, rs, 32, ctx);
 	emit_srli(rd, rd, 32, ctx);
 }
 
+static inline void emit_bswap(u8 rd, s32 imm, struct rv_jit_context *ctx)
+{
+	if (rvzbb_enabled()) {
+		int bits = 64 - imm;
+
+		emit(rvzbb_rev8(rd, rd), ctx);
+		if (bits)
+			emit_srli(rd, rd, bits, ctx);
+	} else {
+		emit_li(RV_REG_T2, 0, ctx);
+
+		emit_andi(RV_REG_T1, rd, 0xff, ctx);
+		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+		emit_srli(rd, rd, 8, ctx);
+		if (imm == 16)
+			goto out_be;
+
+		emit_andi(RV_REG_T1, rd, 0xff, ctx);
+		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+		emit_srli(rd, rd, 8, ctx);
+
+		emit_andi(RV_REG_T1, rd, 0xff, ctx);
+		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+		emit_srli(rd, rd, 8, ctx);
+		if (imm == 32)
+			goto out_be;
+
+		emit_andi(RV_REG_T1, rd, 0xff, ctx);
+		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+		emit_srli(rd, rd, 8, ctx);
+
+		emit_andi(RV_REG_T1, rd, 0xff, ctx);
+		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+		emit_srli(rd, rd, 8, ctx);
+
+		emit_andi(RV_REG_T1, rd, 0xff, ctx);
+		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+		emit_srli(rd, rd, 8, ctx);
+
+		emit_andi(RV_REG_T1, rd, 0xff, ctx);
+		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+		emit_srli(rd, rd, 8, ctx);
+out_be:
+		emit_andi(RV_REG_T1, rd, 0xff, ctx);
+		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+
+		emit_mv(rd, RV_REG_T2, ctx);
+	}
+}
+
 #endif /* __riscv_xlen == 64 */
 
 void bpf_jit_build_prologue(struct rv_jit_context *ctx);
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index f4ca6b787..35753b142 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -1130,8 +1130,7 @@  int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
 	case BPF_ALU | BPF_END | BPF_FROM_LE:
 		switch (imm) {
 		case 16:
-			emit_slli(rd, rd, 48, ctx);
-			emit_srli(rd, rd, 48, ctx);
+			emit_zexth(rd, rd, ctx);
 			break;
 		case 32:
 			if (!aux->verifier_zext)
@@ -1142,54 +1141,9 @@  int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
 			break;
 		}
 		break;
-
 	case BPF_ALU | BPF_END | BPF_FROM_BE:
 	case BPF_ALU64 | BPF_END | BPF_FROM_LE:
-		emit_li(RV_REG_T2, 0, ctx);
-
-		emit_andi(RV_REG_T1, rd, 0xff, ctx);
-		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
-		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
-		emit_srli(rd, rd, 8, ctx);
-		if (imm == 16)
-			goto out_be;
-
-		emit_andi(RV_REG_T1, rd, 0xff, ctx);
-		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
-		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
-		emit_srli(rd, rd, 8, ctx);
-
-		emit_andi(RV_REG_T1, rd, 0xff, ctx);
-		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
-		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
-		emit_srli(rd, rd, 8, ctx);
-		if (imm == 32)
-			goto out_be;
-
-		emit_andi(RV_REG_T1, rd, 0xff, ctx);
-		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
-		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
-		emit_srli(rd, rd, 8, ctx);
-
-		emit_andi(RV_REG_T1, rd, 0xff, ctx);
-		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
-		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
-		emit_srli(rd, rd, 8, ctx);
-
-		emit_andi(RV_REG_T1, rd, 0xff, ctx);
-		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
-		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
-		emit_srli(rd, rd, 8, ctx);
-
-		emit_andi(RV_REG_T1, rd, 0xff, ctx);
-		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
-		emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
-		emit_srli(rd, rd, 8, ctx);
-out_be:
-		emit_andi(RV_REG_T1, rd, 0xff, ctx);
-		emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
-
-		emit_mv(rd, RV_REG_T2, ctx);
+		emit_bswap(rd, imm, ctx);
 		break;
 
 	/* dst = imm */