Message ID | 20230919035839.3297328-4-pulehui@huaweicloud.com (mailing list archive) |
---|---|
State | Handled Elsewhere, archived |
Headers | show |
Series | Zbb support and code simplification for RV64 JIT | expand |
Context | Check | Description |
---|---|---|
conchuod/cover_letter | success | Series has a cover letter |
conchuod/tree_selection | success | Guessed tree name to be for-next at HEAD 0bb80ecc33a8 |
conchuod/fixes_present | success | Fixes tag not required for -next series |
conchuod/maintainers_pattern | success | MAINTAINERS pattern errors before the patch: 5 and now 5 |
conchuod/verify_signedoff | success | Signed-off-by tag matches author and committer |
conchuod/kdoc | success | Errors and warnings before: 0 this patch: 0 |
conchuod/build_rv64_clang_allmodconfig | success | Errors and warnings before: 9 this patch: 9 |
conchuod/module_param | success | Was 0 now: 0 |
conchuod/build_rv64_gcc_allmodconfig | success | Errors and warnings before: 9 this patch: 9 |
conchuod/build_rv32_defconfig | success | Build OK |
conchuod/dtb_warn_rv64 | success | Errors and warnings before: 25 this patch: 25 |
conchuod/header_inline | success | No static functions without inline keyword in header files |
conchuod/checkpatch | success | total: 0 errors, 0 warnings, 0 checks, 118 lines checked |
conchuod/build_rv64_nommu_k210_defconfig | success | Build OK |
conchuod/verify_fixes | success | No Fixes tag |
conchuod/build_rv64_nommu_virt_defconfig | success | Build OK |
Pu Lehui <pulehui@huaweicloud.com> writes: > From: Pu Lehui <pulehui@huawei.com> > > There are many extension helpers in the current branch instructions, and > the implementation is a bit complicated. We simplify this logic through > two simple extension helpers with alternate register. > > Signed-off-by: Pu Lehui <pulehui@huawei.com> > --- > arch/riscv/net/bpf_jit_comp64.c | 82 +++++++++++++-------------------- > 1 file changed, 31 insertions(+), 51 deletions(-) > > diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c > index 4a649e195..0c6ffe11a 100644 > --- a/arch/riscv/net/bpf_jit_comp64.c > +++ b/arch/riscv/net/bpf_jit_comp64.c > @@ -141,6 +141,19 @@ static bool in_auipc_jalr_range(s64 val) > val < ((1L << 31) - (1L << 11)); > } > > +/* Modify rd pointer to alternate reg to avoid corrupting original reg */ > +static void emit_sextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx) > +{ > + emit_sextw(ra, *rd, ctx); > + *rd = ra; > +} > + > +static void emit_zextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx) > +{ > + emit_zextw(ra, *rd, ctx); > + *rd = ra; > +} > + > /* Emit fixed-length instructions for address */ > static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx) > { > @@ -395,38 +408,6 @@ static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn, > *rs = bpf_to_rv_reg(insn->src_reg, ctx); > } > > -static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) > -{ > - emit_mv(RV_REG_T2, *rd, ctx); > - emit_zextw(RV_REG_T2, RV_REG_T2, ctx); > - emit_mv(RV_REG_T1, *rs, ctx); > - emit_zextw(RV_REG_T1, RV_REG_T1, ctx); > - *rd = RV_REG_T2; > - *rs = RV_REG_T1; > -} > - > -static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) > -{ > - emit_sextw(RV_REG_T2, *rd, ctx); > - emit_sextw(RV_REG_T1, *rs, ctx); > - *rd = RV_REG_T2; > - *rs = RV_REG_T1; > -} > - > -static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx) > -{ > - emit_mv(RV_REG_T2, *rd, ctx); > - emit_zextw(RV_REG_T2, RV_REG_T2, ctx); > - emit_zextw(RV_REG_T1, RV_REG_T2, ctx); > - *rd = RV_REG_T2; > -} > - > -static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx) > -{ > - emit_sextw(RV_REG_T2, *rd, ctx); > - *rd = RV_REG_T2; > -} > - > static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr, > struct rv_jit_context *ctx) > { > @@ -1372,22 +1353,22 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, > rvoff = rv_offset(i, off, ctx); > if (!is64) { > s = ctx->ninsns; > - if (is_signed_bpf_cond(BPF_OP(code))) > - emit_sext_32_rd_rs(&rd, &rs, ctx); > - else > - emit_zext_32_rd_rs(&rd, &rs, ctx); > + if (is_signed_bpf_cond(BPF_OP(code))) { > + emit_sextw_alt(&rs, RV_REG_T1, ctx); > + emit_sextw_alt(&rd, RV_REG_T2, ctx); > + } else { > + emit_zextw_alt(&rs, RV_REG_T1, ctx); > + emit_zextw_alt(&rd, RV_REG_T2, ctx); > + } > e = ctx->ninsns; > - Please avoid changes like this. > /* Adjust for extra insns */ > rvoff -= ninsns_rvoff(e - s); > } > - Dito. > if (BPF_OP(code) == BPF_JSET) { > /* Adjust for and */ > rvoff -= 4; > emit_and(RV_REG_T1, rd, rs, ctx); > - emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, > - ctx); > + emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx); > } else { > emit_branch(BPF_OP(code), rd, rs, rvoff, ctx); > } > @@ -1416,21 +1397,20 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, > case BPF_JMP32 | BPF_JSLE | BPF_K: > rvoff = rv_offset(i, off, ctx); > s = ctx->ninsns; > - if (imm) { > + if (imm) > emit_imm(RV_REG_T1, imm, ctx); > - rs = RV_REG_T1; > - } else { > - /* If imm is 0, simply use zero register. */ > - rs = RV_REG_ZERO; > - } > + rs = imm ? RV_REG_T1 : RV_REG_ZERO; > if (!is64) { > - if (is_signed_bpf_cond(BPF_OP(code))) > - emit_sext_32_rd(&rd, ctx); > - else > - emit_zext_32_rd_t1(&rd, ctx); > + if (is_signed_bpf_cond(BPF_OP(code))) { > + emit_sextw_alt(&rd, RV_REG_T2, ctx); > + /* rs has been sign extended */ > + } else { > + emit_zextw_alt(&rd, RV_REG_T2, ctx); > + if (imm) > + emit_zextw(rs, rs, ctx); > + } > } > e = ctx->ninsns; > - Dito. Other than the formatting changes, it looks good! Björn
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 4a649e195..0c6ffe11a 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -141,6 +141,19 @@ static bool in_auipc_jalr_range(s64 val) val < ((1L << 31) - (1L << 11)); } +/* Modify rd pointer to alternate reg to avoid corrupting original reg */ +static void emit_sextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx) +{ + emit_sextw(ra, *rd, ctx); + *rd = ra; +} + +static void emit_zextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx) +{ + emit_zextw(ra, *rd, ctx); + *rd = ra; +} + /* Emit fixed-length instructions for address */ static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx) { @@ -395,38 +408,6 @@ static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn, *rs = bpf_to_rv_reg(insn->src_reg, ctx); } -static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) -{ - emit_mv(RV_REG_T2, *rd, ctx); - emit_zextw(RV_REG_T2, RV_REG_T2, ctx); - emit_mv(RV_REG_T1, *rs, ctx); - emit_zextw(RV_REG_T1, RV_REG_T1, ctx); - *rd = RV_REG_T2; - *rs = RV_REG_T1; -} - -static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) -{ - emit_sextw(RV_REG_T2, *rd, ctx); - emit_sextw(RV_REG_T1, *rs, ctx); - *rd = RV_REG_T2; - *rs = RV_REG_T1; -} - -static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx) -{ - emit_mv(RV_REG_T2, *rd, ctx); - emit_zextw(RV_REG_T2, RV_REG_T2, ctx); - emit_zextw(RV_REG_T1, RV_REG_T2, ctx); - *rd = RV_REG_T2; -} - -static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx) -{ - emit_sextw(RV_REG_T2, *rd, ctx); - *rd = RV_REG_T2; -} - static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr, struct rv_jit_context *ctx) { @@ -1372,22 +1353,22 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, rvoff = rv_offset(i, off, ctx); if (!is64) { s = ctx->ninsns; - if (is_signed_bpf_cond(BPF_OP(code))) - emit_sext_32_rd_rs(&rd, &rs, ctx); - else - emit_zext_32_rd_rs(&rd, &rs, ctx); + if (is_signed_bpf_cond(BPF_OP(code))) { + emit_sextw_alt(&rs, RV_REG_T1, ctx); + emit_sextw_alt(&rd, RV_REG_T2, ctx); + } else { + emit_zextw_alt(&rs, RV_REG_T1, ctx); + emit_zextw_alt(&rd, RV_REG_T2, ctx); + } e = ctx->ninsns; - /* Adjust for extra insns */ rvoff -= ninsns_rvoff(e - s); } - if (BPF_OP(code) == BPF_JSET) { /* Adjust for and */ rvoff -= 4; emit_and(RV_REG_T1, rd, rs, ctx); - emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, - ctx); + emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx); } else { emit_branch(BPF_OP(code), rd, rs, rvoff, ctx); } @@ -1416,21 +1397,20 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_JMP32 | BPF_JSLE | BPF_K: rvoff = rv_offset(i, off, ctx); s = ctx->ninsns; - if (imm) { + if (imm) emit_imm(RV_REG_T1, imm, ctx); - rs = RV_REG_T1; - } else { - /* If imm is 0, simply use zero register. */ - rs = RV_REG_ZERO; - } + rs = imm ? RV_REG_T1 : RV_REG_ZERO; if (!is64) { - if (is_signed_bpf_cond(BPF_OP(code))) - emit_sext_32_rd(&rd, ctx); - else - emit_zext_32_rd_t1(&rd, ctx); + if (is_signed_bpf_cond(BPF_OP(code))) { + emit_sextw_alt(&rd, RV_REG_T2, ctx); + /* rs has been sign extended */ + } else { + emit_zextw_alt(&rd, RV_REG_T2, ctx); + if (imm) + emit_zextw(rs, rs, ctx); + } } e = ctx->ninsns; - /* Adjust for extra insns */ rvoff -= ninsns_rvoff(e - s); emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);