@@ -622,6 +622,21 @@ DEF_HELPER_6(vwmaccus_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwmaccus_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwmaccus_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vqmaccu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vqmaccu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vqmacc_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vqmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vqmaccsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vqmaccsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vqmaccu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vqmaccu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vqmacc_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vqmacc_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vqmaccsu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vqmaccsu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vqmaccus_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vqmaccus_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+
DEF_HELPER_6(vmerge_vvm_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmerge_vvm_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmerge_vvm_w, void, ptr, ptr, ptr, ptr, env, i32)
@@ -423,6 +423,13 @@ vwmacc_vx 111101 . ..... ..... 110 ..... 1010111 @r_vm
vwmaccsu_vv 111111 . ..... ..... 010 ..... 1010111 @r_vm
vwmaccsu_vx 111111 . ..... ..... 110 ..... 1010111 @r_vm
vwmaccus_vx 111110 . ..... ..... 110 ..... 1010111 @r_vm
+vqmaccu_vv 111100 . ..... ..... 000 ..... 1010111 @r_vm
+vqmaccu_vx 111100 . ..... ..... 100 ..... 1010111 @r_vm
+vqmacc_vv 111101 . ..... ..... 000 ..... 1010111 @r_vm
+vqmacc_vx 111101 . ..... ..... 100 ..... 1010111 @r_vm
+vqmaccsu_vv 111111 . ..... ..... 000 ..... 1010111 @r_vm
+vqmaccsu_vx 111111 . ..... ..... 100 ..... 1010111 @r_vm
+vqmaccus_vx 111110 . ..... ..... 100 ..... 1010111 @r_vm
vmv_v_v 010111 1 00000 ..... 000 ..... 1010111 @r2
vmv_v_x 010111 1 00000 ..... 100 ..... 1010111 @r2
vmv_v_i 010111 1 00000 ..... 011 ..... 1010111 @r2
@@ -303,6 +303,33 @@ static uint32_t vreg_ofs(DisasContext *s, int reg)
} \
} while (0)
+/*
+ * Check function for vector instruction with format:
+ * quad-width result and single-width sources (4*SEW = SEW op SEW)
+ *
+ * is_vs1: indicates whether insn[19:15] is a vs1 field or not.
+ */
+#define VEXT_CHECK_QSS(s, rd, rs1, rs2, vm, is_vs1) do { \
+ require(s->flmul <= 2); \
+ require(s->sew < 2); \
+ require_align(rd, s->flmul * 4); \
+ require_align(rs2, s->flmul); \
+ require_vm(rd, vm); \
+ if (s->flmul < 1) { \
+ require_noover(rd, s->flmul * 4, rs2, s->flmul); \
+ } else { \
+ require_noover_widen(rd, s->flmul * 4, rs2, s->flmul); \
+ } \
+ if (is_vs1) { \
+ require_align(rs1, s->flmul); \
+ if (s->flmul < 1) { \
+ require_noover(rd, s->flmul * 4, rs1, s->flmul); \
+ } else { \
+ require_noover_widen(rd, s->flmul * 4, rs1, s->flmul); \
+ } \
+ } \
+} while (0)
+
/*
* Check function for vector instruction with format:
* double-width result and double-width source1 and single-width
@@ -1924,7 +1951,63 @@ GEN_OPIVX_WIDEN_TRANS(vwmacc_vx)
GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx)
GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
-/* Vector Integer Merge and Move Instructions */
+/* Vector Quad-Widening Integer Multiply-Add Instructions (Extension Zvqmac) */
+/* OPIVV with QUAD-WIDEN */
+static bool opivv_quad_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ REQUIRE_RVV;
+ VEXT_CHECK_ISA_ILL(s);
+ VEXT_CHECK_QSS(s, a->rd, a->rs1, a->rs2, a->vm, true);
+ return true;
+}
+
+#define GEN_OPIVV_QUAD_WIDEN_TRANS(NAME, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ static gen_helper_gvec_4_ptr * const fns[2] = { \
+ gen_helper_##NAME##_b, \
+ gen_helper_##NAME##_h \
+ }; \
+ return do_opivv_widen(s, a, fns[s->sew], CHECK); \
+}
+
+GEN_OPIVV_QUAD_WIDEN_TRANS(vqmaccu_vv, opivv_quad_widen_check)
+GEN_OPIVV_QUAD_WIDEN_TRANS(vqmacc_vv, opivv_quad_widen_check)
+GEN_OPIVV_QUAD_WIDEN_TRANS(vqmaccsu_vv, opivv_quad_widen_check)
+
+/* OPIVX with QUAD-WIDEN */
+static bool opivx_quad_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ REQUIRE_RVV;
+ VEXT_CHECK_ISA_ILL(s);
+ VEXT_CHECK_QSS(s, a->rd, a->rs1, a->rs2, a->vm, false);
+ return true;
+}
+
+static bool do_opivx_quad_widen(DisasContext *s, arg_rmrr *a,
+ gen_helper_opivx *fn)
+{
+ if (opivx_quad_widen_check(s, a)) {
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
+ }
+ return false;
+}
+
+#define GEN_OPIVX_QUAD_WIDEN_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ static gen_helper_opivx * const fns[3] = { \
+ gen_helper_##NAME##_b, \
+ gen_helper_##NAME##_h \
+ }; \
+ return do_opivx_quad_widen(s, a, fns[s->sew]); \
+}
+
+GEN_OPIVX_QUAD_WIDEN_TRANS(vqmaccu_vx)
+GEN_OPIVX_QUAD_WIDEN_TRANS(vqmacc_vx)
+GEN_OPIVX_QUAD_WIDEN_TRANS(vqmaccsu_vx)
+GEN_OPIVX_QUAD_WIDEN_TRANS(vqmaccus_vx)
+
static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
{
if (vext_check_isa_ill(s) &&
@@ -2127,6 +2127,46 @@ GEN_VEXT_VX(vwmaccus_vx_b, 1, 2, clearh)
GEN_VEXT_VX(vwmaccus_vx_h, 2, 4, clearl)
GEN_VEXT_VX(vwmaccus_vx_w, 4, 8, clearq)
+/* Vector Quad-Widening Integer Multiply-Add Instructions */
+#define QOP_UUU_B uint32_t, uint8_t, uint8_t, uint32_t, uint32_t
+#define QOP_UUU_H uint64_t, uint16_t, uint16_t, uint64_t, uint64_t
+#define QOP_SSS_B int32_t, int8_t, int8_t, int32_t, int32_t
+#define QOP_SSS_H int64_t, int16_t, int16_t, int64_t, int64_t
+#define QOP_SUS_B int32_t, uint8_t, int8_t, uint32_t, int32_t
+#define QOP_SUS_H int64_t, uint16_t, int16_t, uint64_t, int64_t
+#define QOP_SSU_B int32_t, int8_t, uint8_t, int32_t, uint32_t
+#define QOP_SSU_H int64_t, int16_t, uint16_t, int64_t, uint64_t
+
+RVVCALL(OPIVV3, vqmaccu_vv_b, QOP_UUU_B, H4, H1, H1, DO_MACC)
+RVVCALL(OPIVV3, vqmaccu_vv_h, QOP_UUU_H, H8, H2, H2, DO_MACC)
+RVVCALL(OPIVV3, vqmacc_vv_b, QOP_SSS_B, H4, H1, H1, DO_MACC)
+RVVCALL(OPIVV3, vqmacc_vv_h, QOP_SSS_H, H8, H2, H2, DO_MACC)
+RVVCALL(OPIVV3, vqmaccsu_vv_b, QOP_SSU_B, H4, H1, H1, DO_MACC)
+RVVCALL(OPIVV3, vqmaccsu_vv_h, QOP_SSU_H, H8, H2, H2, DO_MACC)
+GEN_VEXT_VV(vqmaccu_vv_b, 1, 4, clearl)
+GEN_VEXT_VV(vqmaccu_vv_h, 2, 8, clearq)
+GEN_VEXT_VV(vqmacc_vv_b, 1, 4, clearl)
+GEN_VEXT_VV(vqmacc_vv_h, 2, 8, clearq)
+GEN_VEXT_VV(vqmaccsu_vv_b, 1, 4, clearl)
+GEN_VEXT_VV(vqmaccsu_vv_h, 2, 8, clearq)
+
+RVVCALL(OPIVX3, vqmaccu_vx_b, QOP_UUU_B, H4, H1, DO_MACC)
+RVVCALL(OPIVX3, vqmaccu_vx_h, QOP_UUU_H, H8, H2, DO_MACC)
+RVVCALL(OPIVX3, vqmacc_vx_b, QOP_SSS_B, H4, H1, DO_MACC)
+RVVCALL(OPIVX3, vqmacc_vx_h, QOP_SSS_H, H8, H2, DO_MACC)
+RVVCALL(OPIVX3, vqmaccsu_vx_b, QOP_SSU_B, H4, H1, DO_MACC)
+RVVCALL(OPIVX3, vqmaccsu_vx_h, QOP_SSU_H, H8, H2, DO_MACC)
+RVVCALL(OPIVX3, vqmaccus_vx_b, QOP_SUS_B, H4, H1, DO_MACC)
+RVVCALL(OPIVX3, vqmaccus_vx_h, QOP_SUS_H, H8, H2, DO_MACC)
+GEN_VEXT_VX(vqmaccu_vx_b, 1, 4, clearl)
+GEN_VEXT_VX(vqmaccu_vx_h, 2, 8, clearq)
+GEN_VEXT_VX(vqmacc_vx_b, 1, 4, clearl)
+GEN_VEXT_VX(vqmacc_vx_h, 2, 8, clearq)
+GEN_VEXT_VX(vqmaccsu_vx_b, 1, 4, clearl)
+GEN_VEXT_VX(vqmaccsu_vx_h, 2, 8, clearq)
+GEN_VEXT_VX(vqmaccus_vx_b, 1, 4, clearl)
+GEN_VEXT_VX(vqmaccus_vx_h, 2, 8, clearq)
+
/* Vector Integer Merge and Move Instructions */
#define GEN_VEXT_VMV_VV(NAME, ETYPE, H, CLEAR_FN) \
void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \