diff mbox series

[v8,09/14] target/riscv: rvk: add support for sha512 related instructions for RV32 in zknh extension

Message ID 20220301115828.355-10-liweiwei@iscas.ac.cn (mailing list archive)
State New, archived
Headers show
Series support subsets of scalar crypto extension | expand

Commit Message

Weiwei Li March 1, 2022, 11:58 a.m. UTC
- add sha512sum0r, sha512sig0l, sha512sum1r, sha512sig1l, sha512sig0h and sha512sig1h instructions

Co-authored-by: Zewen Ye <lustrew@foxmail.com>
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
---
 target/riscv/insn32.decode              |   6 ++
 target/riscv/insn_trans/trans_rvk.c.inc | 100 ++++++++++++++++++++++++
 2 files changed, 106 insertions(+)

Comments

Richard Henderson March 1, 2022, 6:24 p.m. UTC | #1
On 3/1/22 01:58, Weiwei Li wrote:
>   - add sha512sum0r, sha512sig0l, sha512sum1r, sha512sig1l, sha512sig0h and sha512sig1h instructions
> 
> Co-authored-by: Zewen Ye <lustrew@foxmail.com>
> Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
> Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
> ---
>   target/riscv/insn32.decode              |   6 ++
>   target/riscv/insn_trans/trans_rvk.c.inc | 100 ++++++++++++++++++++++++
>   2 files changed, 106 insertions(+)

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>

r~
Alistair Francis March 11, 2022, 7:14 a.m. UTC | #2
On Tue, Mar 1, 2022 at 10:01 PM Weiwei Li <liweiwei@iscas.ac.cn> wrote:
>
>  - add sha512sum0r, sha512sig0l, sha512sum1r, sha512sig1l, sha512sig0h and sha512sig1h instructions
>
> Co-authored-by: Zewen Ye <lustrew@foxmail.com>
> Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
> Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>

Reviewed-by: Alistair Francis <alistair.francis@wdc.com>

Alistair

> ---
>  target/riscv/insn32.decode              |   6 ++
>  target/riscv/insn_trans/trans_rvk.c.inc | 100 ++++++++++++++++++++++++
>  2 files changed, 106 insertions(+)
>
> diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
> index db28ecdd2b..02a0c71890 100644
> --- a/target/riscv/insn32.decode
> +++ b/target/riscv/insn32.decode
> @@ -862,3 +862,9 @@ sha256sig0  00 01000 00010 ..... 001 ..... 0010011 @r2
>  sha256sig1  00 01000 00011 ..... 001 ..... 0010011 @r2
>  sha256sum0  00 01000 00000 ..... 001 ..... 0010011 @r2
>  sha256sum1  00 01000 00001 ..... 001 ..... 0010011 @r2
> +sha512sum0r 01 01000 ..... ..... 000 ..... 0110011 @r
> +sha512sum1r 01 01001 ..... ..... 000 ..... 0110011 @r
> +sha512sig0l 01 01010 ..... ..... 000 ..... 0110011 @r
> +sha512sig0h 01 01110 ..... ..... 000 ..... 0110011 @r
> +sha512sig1l 01 01011 ..... ..... 000 ..... 0110011 @r
> +sha512sig1h 01 01111 ..... ..... 000 ..... 0110011 @r
> diff --git a/target/riscv/insn_trans/trans_rvk.c.inc b/target/riscv/insn_trans/trans_rvk.c.inc
> index beea7f8e96..bb89a53f52 100644
> --- a/target/riscv/insn_trans/trans_rvk.c.inc
> +++ b/target/riscv/insn_trans/trans_rvk.c.inc
> @@ -167,3 +167,103 @@ static bool trans_sha256sum1(DisasContext *ctx, arg_sha256sum1 *a)
>      REQUIRE_ZKNH(ctx);
>      return gen_sha256(ctx, a, EXT_NONE, tcg_gen_rotri_i32, 6, 11, 25);
>  }
> +
> +static bool gen_sha512_rv32(DisasContext *ctx, arg_r *a, DisasExtend ext,
> +                            void (*func1)(TCGv_i64, TCGv_i64, int64_t),
> +                            void (*func2)(TCGv_i64, TCGv_i64, int64_t),
> +                            int64_t num1, int64_t num2, int64_t num3)
> +{
> +    TCGv dest = dest_gpr(ctx, a->rd);
> +    TCGv src1 = get_gpr(ctx, a->rs1, ext);
> +    TCGv src2 = get_gpr(ctx, a->rs2, ext);
> +    TCGv_i64 t0 = tcg_temp_new_i64();
> +    TCGv_i64 t1 = tcg_temp_new_i64();
> +    TCGv_i64 t2 = tcg_temp_new_i64();
> +
> +    tcg_gen_concat_tl_i64(t0, src1, src2);
> +    func1(t1, t0, num1);
> +    func2(t2, t0, num2);
> +    tcg_gen_xor_i64(t1, t1, t2);
> +    tcg_gen_rotri_i64(t2, t0, num3);
> +    tcg_gen_xor_i64(t1, t1, t2);
> +    tcg_gen_trunc_i64_tl(dest, t1);
> +
> +    gen_set_gpr(ctx, a->rd, dest);
> +    tcg_temp_free_i64(t0);
> +    tcg_temp_free_i64(t1);
> +    tcg_temp_free_i64(t2);
> +    return true;
> +}
> +
> +static bool trans_sha512sum0r(DisasContext *ctx, arg_sha512sum0r *a)
> +{
> +    REQUIRE_32BIT(ctx);
> +    REQUIRE_ZKNH(ctx);
> +    return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64,
> +                           tcg_gen_rotli_i64, 25, 30, 28);
> +}
> +
> +static bool trans_sha512sum1r(DisasContext *ctx, arg_sha512sum1r *a)
> +{
> +    REQUIRE_32BIT(ctx);
> +    REQUIRE_ZKNH(ctx);
> +    return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64,
> +                           tcg_gen_rotri_i64, 23, 14, 18);
> +}
> +
> +static bool trans_sha512sig0l(DisasContext *ctx, arg_sha512sig0l *a)
> +{
> +    REQUIRE_32BIT(ctx);
> +    REQUIRE_ZKNH(ctx);
> +    return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotri_i64,
> +                           tcg_gen_rotri_i64, 1, 7, 8);
> +}
> +
> +static bool trans_sha512sig1l(DisasContext *ctx, arg_sha512sig1l *a)
> +{
> +    REQUIRE_32BIT(ctx);
> +    REQUIRE_ZKNH(ctx);
> +    return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64,
> +                           tcg_gen_rotri_i64, 3, 6, 19);
> +}
> +
> +static bool gen_sha512h_rv32(DisasContext *ctx, arg_r *a, DisasExtend ext,
> +                             void (*func)(TCGv_i64, TCGv_i64, int64_t),
> +                             int64_t num1, int64_t num2, int64_t num3)
> +{
> +    TCGv dest = dest_gpr(ctx, a->rd);
> +    TCGv src1 = get_gpr(ctx, a->rs1, ext);
> +    TCGv src2 = get_gpr(ctx, a->rs2, ext);
> +    TCGv_i64 t0 = tcg_temp_new_i64();
> +    TCGv_i64 t1 = tcg_temp_new_i64();
> +    TCGv_i64 t2 = tcg_temp_new_i64();
> +
> +    tcg_gen_concat_tl_i64(t0, src1, src2);
> +    func(t1, t0, num1);
> +    tcg_gen_ext32u_i64(t2, t0);
> +    tcg_gen_shri_i64(t2, t2, num2);
> +    tcg_gen_xor_i64(t1, t1, t2);
> +    tcg_gen_rotri_i64(t2, t0, num3);
> +    tcg_gen_xor_i64(t1, t1, t2);
> +    tcg_gen_trunc_i64_tl(dest, t1);
> +
> +    gen_set_gpr(ctx, a->rd, dest);
> +    tcg_temp_free_i64(t0);
> +    tcg_temp_free_i64(t1);
> +    tcg_temp_free_i64(t2);
> +    return true;
> +}
> +
> +static bool trans_sha512sig0h(DisasContext *ctx, arg_sha512sig0h *a)
> +{
> +    REQUIRE_32BIT(ctx);
> +    REQUIRE_ZKNH(ctx);
> +    return gen_sha512h_rv32(ctx, a, EXT_NONE, tcg_gen_rotri_i64, 1, 7, 8);
> +}
> +
> +static bool trans_sha512sig1h(DisasContext *ctx, arg_sha512sig1h *a)
> +{
> +    REQUIRE_32BIT(ctx);
> +    REQUIRE_ZKNH(ctx);
> +    return gen_sha512h_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64, 3, 6, 19);
> +}
> --
> 2.17.1
>
>
diff mbox series

Patch

diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index db28ecdd2b..02a0c71890 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -862,3 +862,9 @@  sha256sig0  00 01000 00010 ..... 001 ..... 0010011 @r2
 sha256sig1  00 01000 00011 ..... 001 ..... 0010011 @r2
 sha256sum0  00 01000 00000 ..... 001 ..... 0010011 @r2
 sha256sum1  00 01000 00001 ..... 001 ..... 0010011 @r2
+sha512sum0r 01 01000 ..... ..... 000 ..... 0110011 @r
+sha512sum1r 01 01001 ..... ..... 000 ..... 0110011 @r
+sha512sig0l 01 01010 ..... ..... 000 ..... 0110011 @r
+sha512sig0h 01 01110 ..... ..... 000 ..... 0110011 @r
+sha512sig1l 01 01011 ..... ..... 000 ..... 0110011 @r
+sha512sig1h 01 01111 ..... ..... 000 ..... 0110011 @r
diff --git a/target/riscv/insn_trans/trans_rvk.c.inc b/target/riscv/insn_trans/trans_rvk.c.inc
index beea7f8e96..bb89a53f52 100644
--- a/target/riscv/insn_trans/trans_rvk.c.inc
+++ b/target/riscv/insn_trans/trans_rvk.c.inc
@@ -167,3 +167,103 @@  static bool trans_sha256sum1(DisasContext *ctx, arg_sha256sum1 *a)
     REQUIRE_ZKNH(ctx);
     return gen_sha256(ctx, a, EXT_NONE, tcg_gen_rotri_i32, 6, 11, 25);
 }
+
+static bool gen_sha512_rv32(DisasContext *ctx, arg_r *a, DisasExtend ext,
+                            void (*func1)(TCGv_i64, TCGv_i64, int64_t),
+                            void (*func2)(TCGv_i64, TCGv_i64, int64_t),
+                            int64_t num1, int64_t num2, int64_t num3)
+{
+    TCGv dest = dest_gpr(ctx, a->rd);
+    TCGv src1 = get_gpr(ctx, a->rs1, ext);
+    TCGv src2 = get_gpr(ctx, a->rs2, ext);
+    TCGv_i64 t0 = tcg_temp_new_i64();
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 t2 = tcg_temp_new_i64();
+
+    tcg_gen_concat_tl_i64(t0, src1, src2);
+    func1(t1, t0, num1);
+    func2(t2, t0, num2);
+    tcg_gen_xor_i64(t1, t1, t2);
+    tcg_gen_rotri_i64(t2, t0, num3);
+    tcg_gen_xor_i64(t1, t1, t2);
+    tcg_gen_trunc_i64_tl(dest, t1);
+
+    gen_set_gpr(ctx, a->rd, dest);
+    tcg_temp_free_i64(t0);
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+    return true;
+}
+
+static bool trans_sha512sum0r(DisasContext *ctx, arg_sha512sum0r *a)
+{
+    REQUIRE_32BIT(ctx);
+    REQUIRE_ZKNH(ctx);
+    return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64,
+                           tcg_gen_rotli_i64, 25, 30, 28);
+}
+
+static bool trans_sha512sum1r(DisasContext *ctx, arg_sha512sum1r *a)
+{
+    REQUIRE_32BIT(ctx);
+    REQUIRE_ZKNH(ctx);
+    return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64,
+                           tcg_gen_rotri_i64, 23, 14, 18);
+}
+
+static bool trans_sha512sig0l(DisasContext *ctx, arg_sha512sig0l *a)
+{
+    REQUIRE_32BIT(ctx);
+    REQUIRE_ZKNH(ctx);
+    return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotri_i64,
+                           tcg_gen_rotri_i64, 1, 7, 8);
+}
+
+static bool trans_sha512sig1l(DisasContext *ctx, arg_sha512sig1l *a)
+{
+    REQUIRE_32BIT(ctx);
+    REQUIRE_ZKNH(ctx);
+    return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64,
+                           tcg_gen_rotri_i64, 3, 6, 19);
+}
+
+static bool gen_sha512h_rv32(DisasContext *ctx, arg_r *a, DisasExtend ext,
+                             void (*func)(TCGv_i64, TCGv_i64, int64_t),
+                             int64_t num1, int64_t num2, int64_t num3)
+{
+    TCGv dest = dest_gpr(ctx, a->rd);
+    TCGv src1 = get_gpr(ctx, a->rs1, ext);
+    TCGv src2 = get_gpr(ctx, a->rs2, ext);
+    TCGv_i64 t0 = tcg_temp_new_i64();
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 t2 = tcg_temp_new_i64();
+
+    tcg_gen_concat_tl_i64(t0, src1, src2);
+    func(t1, t0, num1);
+    tcg_gen_ext32u_i64(t2, t0);
+    tcg_gen_shri_i64(t2, t2, num2);
+    tcg_gen_xor_i64(t1, t1, t2);
+    tcg_gen_rotri_i64(t2, t0, num3);
+    tcg_gen_xor_i64(t1, t1, t2);
+    tcg_gen_trunc_i64_tl(dest, t1);
+
+    gen_set_gpr(ctx, a->rd, dest);
+    tcg_temp_free_i64(t0);
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+    return true;
+}
+
+static bool trans_sha512sig0h(DisasContext *ctx, arg_sha512sig0h *a)
+{
+    REQUIRE_32BIT(ctx);
+    REQUIRE_ZKNH(ctx);
+    return gen_sha512h_rv32(ctx, a, EXT_NONE, tcg_gen_rotri_i64, 1, 7, 8);
+}
+
+static bool trans_sha512sig1h(DisasContext *ctx, arg_sha512sig1h *a)
+{
+    REQUIRE_32BIT(ctx);
+    REQUIRE_ZKNH(ctx);
+    return gen_sha512h_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64, 3, 6, 19);
+}