@@ -1145,3 +1145,4 @@ DEF_HELPER_FLAGS_3(sm4ks, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
/* Vector crypto functions */
DEF_HELPER_6(vclmul_vv, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vclmul_vx, void, ptr, ptr, tl, ptr, env, i32)
@@ -911,3 +911,4 @@ czero_nez 0000111 ..... ..... 111 ..... 0110011 @r
# *** RV64 Zvkb vector crypto extension ***
vclmul_vv 001100 . ..... ..... 010 ..... 1010111 @r_vm
+vclmul_vx 001100 . ..... ..... 110 ..... 1010111 @r_vm
@@ -38,3 +38,51 @@ static bool vclmul_vv_check(DisasContext *s, arg_rmrr *a)
}
GEN_VV_MASKED_TRANS(vclmul_vv, vclmul_vv_check)
+
+#define GEN_VX_MASKED_TRANS(NAME, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (CHECK(s, a)) { \
+ TCGv_ptr rd_v, v0_v, rs2_v; \
+ TCGv rs1; \
+ TCGv_i32 desc; \
+ uint32_t data = 0; \
+ \
+ TCGLabel *over = gen_new_label(); \
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
+ \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
+ \
+ rd_v = tcg_temp_new_ptr(); \
+ v0_v = tcg_temp_new_ptr(); \
+ rs1 = get_gpr(s, a->rs1, EXT_ZERO); \
+ rs2_v = tcg_temp_new_ptr(); \
+ desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, \
+ s->cfg_ptr->vlen / 8, data)); \
+ tcg_gen_addi_ptr(rd_v, cpu_env, vreg_ofs(s, a->rd)); \
+ tcg_gen_addi_ptr(v0_v, cpu_env, vreg_ofs(s, 0)); \
+ tcg_gen_addi_ptr(rs2_v, cpu_env, vreg_ofs(s, a->rs2)); \
+ gen_helper_##NAME(rd_v, v0_v, rs1, rs2_v, cpu_env, desc); \
+ \
+ mark_vs_dirty(s); \
+ gen_set_label(over); \
+ return true; \
+ } \
+ return false; \
+}
+
+static bool zvkb_vx_check(DisasContext *s, arg_rmrr *a)
+{
+ return opivx_check(s, a) && s->cfg_ptr->ext_zvkb == true;
+}
+
+static bool vclmul_vx_check(DisasContext *s, arg_rmrr *a)
+{
+ return zvkb_vx_check(s, a) && s->sew == MO_64;
+}
+
+GEN_VX_MASKED_TRANS(vclmul_vx, vclmul_vx_check)
@@ -40,3 +40,5 @@ static uint64_t clmul64(uint64_t y, uint64_t x)
RVVCALL(OPIVV2, vclmul_vv, OP_UUU_D, H8, H8, H8, clmul64)
GEN_VEXT_VV(vclmul_vv, 8)
+RVVCALL(OPIVX2, vclmul_vx, OP_UUU_D, H8, H8, clmul64)
+GEN_VEXT_VX(vclmul_vx, 8)