Message ID | 20220318041944.19859-11-liweiwei@iscas.ac.cn (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | support subsets of scalar crypto extension | expand |
On Fri, Mar 18, 2022 at 2:26 PM Weiwei Li <liweiwei@iscas.ac.cn> wrote: > > - add sha512sum0, sha512sig0, sha512sum1 and sha512sig1 instructions > > Co-authored-by: Zewen Ye <lustrew@foxmail.com> > Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn> > Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn> > Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Alistair > --- > target/riscv/insn32.decode | 5 +++ > target/riscv/insn_trans/trans_rvk.c.inc | 53 +++++++++++++++++++++++++ > 2 files changed, 58 insertions(+) > > diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode > index 02a0c71890..d9ebb138d1 100644 > --- a/target/riscv/insn32.decode > +++ b/target/riscv/insn32.decode > @@ -868,3 +868,8 @@ sha512sig0l 01 01010 ..... ..... 000 ..... 0110011 @r > sha512sig0h 01 01110 ..... ..... 000 ..... 0110011 @r > sha512sig1l 01 01011 ..... ..... 000 ..... 0110011 @r > sha512sig1h 01 01111 ..... ..... 000 ..... 0110011 @r > +# *** RV64 Zknh Standard Extension *** > +sha512sig0 00 01000 00110 ..... 001 ..... 0010011 @r2 > +sha512sig1 00 01000 00111 ..... 001 ..... 0010011 @r2 > +sha512sum0 00 01000 00100 ..... 001 ..... 0010011 @r2 > +sha512sum1 00 01000 00101 ..... 001 ..... 0010011 @r2 > diff --git a/target/riscv/insn_trans/trans_rvk.c.inc b/target/riscv/insn_trans/trans_rvk.c.inc > index bb89a53f52..b1ce4f27cf 100644 > --- a/target/riscv/insn_trans/trans_rvk.c.inc > +++ b/target/riscv/insn_trans/trans_rvk.c.inc > @@ -267,3 +267,56 @@ static bool trans_sha512sig1h(DisasContext *ctx, arg_sha512sig1h *a) > REQUIRE_ZKNH(ctx); > return gen_sha512h_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64, 3, 6, 19); > } > + > +static bool gen_sha512_rv64(DisasContext *ctx, arg_r2 *a, DisasExtend ext, > + void (*func)(TCGv_i64, TCGv_i64, int64_t), > + int64_t num1, int64_t num2, int64_t num3) > +{ > + TCGv dest = dest_gpr(ctx, a->rd); > + TCGv src1 = get_gpr(ctx, a->rs1, ext); > + TCGv_i64 t0 = tcg_temp_new_i64(); > + TCGv_i64 t1 = tcg_temp_new_i64(); > + TCGv_i64 t2 = tcg_temp_new_i64(); > + > + tcg_gen_extu_tl_i64(t0, src1); > + tcg_gen_rotri_i64(t1, t0, num1); > + tcg_gen_rotri_i64(t2, t0, num2); > + tcg_gen_xor_i64(t1, t1, t2); > + func(t2, t0, num3); > + tcg_gen_xor_i64(t1, t1, t2); > + tcg_gen_trunc_i64_tl(dest, t1); > + > + gen_set_gpr(ctx, a->rd, dest); > + tcg_temp_free_i64(t0); > + tcg_temp_free_i64(t1); > + tcg_temp_free_i64(t2); > + return true; > +} > + > +static bool trans_sha512sig0(DisasContext *ctx, arg_sha512sig0 *a) > +{ > + REQUIRE_64BIT(ctx); > + REQUIRE_ZKNH(ctx); > + return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_shri_i64, 1, 8, 7); > +} > + > +static bool trans_sha512sig1(DisasContext *ctx, arg_sha512sig1 *a) > +{ > + REQUIRE_64BIT(ctx); > + REQUIRE_ZKNH(ctx); > + return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_shri_i64, 19, 61, 6); > +} > + > +static bool trans_sha512sum0(DisasContext *ctx, arg_sha512sum0 *a) > +{ > + REQUIRE_64BIT(ctx); > + REQUIRE_ZKNH(ctx); > + return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_rotri_i64, 28, 34, 39); > +} > + > +static bool trans_sha512sum1(DisasContext *ctx, arg_sha512sum1 *a) > +{ > + REQUIRE_64BIT(ctx); > + REQUIRE_ZKNH(ctx); > + return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_rotri_i64, 14, 18, 41); > +} > -- > 2.17.1 > >
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index 02a0c71890..d9ebb138d1 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -868,3 +868,8 @@ sha512sig0l 01 01010 ..... ..... 000 ..... 0110011 @r sha512sig0h 01 01110 ..... ..... 000 ..... 0110011 @r sha512sig1l 01 01011 ..... ..... 000 ..... 0110011 @r sha512sig1h 01 01111 ..... ..... 000 ..... 0110011 @r +# *** RV64 Zknh Standard Extension *** +sha512sig0 00 01000 00110 ..... 001 ..... 0010011 @r2 +sha512sig1 00 01000 00111 ..... 001 ..... 0010011 @r2 +sha512sum0 00 01000 00100 ..... 001 ..... 0010011 @r2 +sha512sum1 00 01000 00101 ..... 001 ..... 0010011 @r2 diff --git a/target/riscv/insn_trans/trans_rvk.c.inc b/target/riscv/insn_trans/trans_rvk.c.inc index bb89a53f52..b1ce4f27cf 100644 --- a/target/riscv/insn_trans/trans_rvk.c.inc +++ b/target/riscv/insn_trans/trans_rvk.c.inc @@ -267,3 +267,56 @@ static bool trans_sha512sig1h(DisasContext *ctx, arg_sha512sig1h *a) REQUIRE_ZKNH(ctx); return gen_sha512h_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64, 3, 6, 19); } + +static bool gen_sha512_rv64(DisasContext *ctx, arg_r2 *a, DisasExtend ext, + void (*func)(TCGv_i64, TCGv_i64, int64_t), + int64_t num1, int64_t num2, int64_t num3) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, ext); + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + + tcg_gen_extu_tl_i64(t0, src1); + tcg_gen_rotri_i64(t1, t0, num1); + tcg_gen_rotri_i64(t2, t0, num2); + tcg_gen_xor_i64(t1, t1, t2); + func(t2, t0, num3); + tcg_gen_xor_i64(t1, t1, t2); + tcg_gen_trunc_i64_tl(dest, t1); + + gen_set_gpr(ctx, a->rd, dest); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + return true; +} + +static bool trans_sha512sig0(DisasContext *ctx, arg_sha512sig0 *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_shri_i64, 1, 8, 7); +} + +static bool trans_sha512sig1(DisasContext *ctx, arg_sha512sig1 *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_shri_i64, 19, 61, 6); +} + +static bool trans_sha512sum0(DisasContext *ctx, arg_sha512sum0 *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_rotri_i64, 28, 34, 39); +} + +static bool trans_sha512sum1(DisasContext *ctx, arg_sha512sum1 *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZKNH(ctx); + return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_rotri_i64, 14, 18, 41); +}