@@ -1096,3 +1096,17 @@ DEF_HELPER_6(vcompress_vm_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vcompress_vm_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vcompress_vm_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vcompress_vm_d, void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_5(vzext_vf2_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vzext_vf2_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vzext_vf2_d, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vzext_vf4_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vzext_vf4_d, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vzext_vf8_d, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_5(vsext_vf2_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsext_vf2_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsext_vf2_d, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsext_vf4_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsext_vf4_d, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsext_vf8_d, void, ptr, ptr, ptr, env, i32)
@@ -600,5 +600,13 @@ vmv2r_v 100111 1 ..... 00001 011 ..... 1010111 @r2rd
vmv4r_v 100111 1 ..... 00011 011 ..... 1010111 @r2rd
vmv8r_v 100111 1 ..... 00111 011 ..... 1010111 @r2rd
+# Vector Integer Extension
+vzext_vf2 010010 . ..... 00110 010 ..... 1010111 @r2_vm
+vzext_vf4 010010 . ..... 00100 010 ..... 1010111 @r2_vm
+vzext_vf8 010010 . ..... 00010 010 ..... 1010111 @r2_vm
+vsext_vf2 010010 . ..... 00111 010 ..... 1010111 @r2_vm
+vsext_vf4 010010 . ..... 00101 010 ..... 1010111 @r2_vm
+vsext_vf8 010010 . ..... 00011 010 ..... 1010111 @r2_vm
+
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
@@ -352,6 +352,23 @@ static bool vext_check_overlap_mask(DisasContext *s, uint32_t vd, bool vm,
return (vm != 0 || vd != 0) || (!force && (s->lmul == 0));
}
+/*
+ * Check function for vector integer extension instructions.
+ */
+#define VEXT_CHECK_EXT(s, rd, rs2, vm, div) do { \
+ uint32_t from = (1 << (s->sew + 3)) / div; \
+ require(from >= 8 && from <= 64); \
+ require(rd != rs2); \
+ require_align(rd, s->flmul); \
+ require_align(rs2, s->flmul / div); \
+ if ((s->flmul / div) < 1) { \
+ require_noover(rd, s->flmul, rs2, s->flmul / div); \
+ } else { \
+ require_noover_widen(rd, s->flmul, rs2, s->flmul / div); \
+ } \
+ require_vm(vm, rd); \
+} while (0)
+
/*
* In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
* So RVV is also be checked in this function.
@@ -3247,3 +3264,80 @@ GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
+
+static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
+{
+ REQUIRE_RVV;
+ VEXT_CHECK_EXT(s, a->rd, a->rs2, a->vm, div);
+ return true;
+}
+
+static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
+{
+ uint32_t data = 0;
+ gen_helper_gvec_3_ptr *fn;
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ static gen_helper_gvec_3_ptr * const fns[6][4] = {
+ {
+ NULL, gen_helper_vzext_vf2_h,
+ gen_helper_vzext_vf2_w, gen_helper_vzext_vf2_d
+ },
+ {
+ NULL, NULL,
+ gen_helper_vzext_vf4_w, gen_helper_vzext_vf4_d,
+ },
+ {
+ NULL, NULL,
+ NULL, gen_helper_vzext_vf8_d
+ },
+ {
+ NULL, gen_helper_vsext_vf2_h,
+ gen_helper_vsext_vf2_w, gen_helper_vsext_vf2_d
+ },
+ {
+ NULL, NULL,
+ gen_helper_vsext_vf4_w, gen_helper_vsext_vf4_d,
+ },
+ {
+ NULL, NULL,
+ NULL, gen_helper_vsext_vf8_d
+ }
+ };
+
+ fn = fns[seq][s->sew];
+ if (fn == NULL) {
+ return false;
+ }
+
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
+
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+ vreg_ofs(s, a->rs2), cpu_env, 0,
+ s->vlen / 8, data, fn);
+
+ mark_vs_dirty(s);
+ gen_set_label(over);
+ return true;
+}
+
+/* Vector Integer Extension */
+#define GEN_INT_EXT_TRANS(NAME, DIV, SEQ) \
+static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
+{ \
+ if (int_ext_check(s, a, DIV)) { \
+ return int_ext_op(s, a, SEQ); \
+ } \
+ return false; \
+}
+
+GEN_INT_EXT_TRANS(vzext_vf2, 2, 0)
+GEN_INT_EXT_TRANS(vzext_vf4, 4, 1)
+GEN_INT_EXT_TRANS(vzext_vf8, 8, 2)
+GEN_INT_EXT_TRANS(vsext_vf2, 2, 3)
+GEN_INT_EXT_TRANS(vsext_vf4, 4, 4)
+GEN_INT_EXT_TRANS(vsext_vf8, 8, 5)
@@ -4976,3 +4976,37 @@ GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t, H1, clearb)
GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2, clearh)
GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4, clearl)
GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8, clearq)
+
+/* Vector Integer Extension */
+#define GEN_VEXT_INT_EXT(NAME, ETYPE, DTYPE, HD, HS1, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t vlmax = vext_max_elems(desc, sizeof(ETYPE), false); \
+ uint32_t vta = vext_vta(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, i)) { \
+ continue; \
+ } \
+ *((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \
+ } \
+ CLEAR_FN(vd, vta, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t, H2, H1, clearh)
+GEN_VEXT_INT_EXT(vzext_vf2_w, uint32_t, uint16_t, H4, H2, clearl)
+GEN_VEXT_INT_EXT(vzext_vf2_d, uint64_t, uint32_t, H8, H4, clearq)
+GEN_VEXT_INT_EXT(vzext_vf4_w, uint32_t, uint8_t, H4, H1, clearl)
+GEN_VEXT_INT_EXT(vzext_vf4_d, uint64_t, uint16_t, H8, H2, clearq)
+GEN_VEXT_INT_EXT(vzext_vf8_d, uint64_t, uint8_t, H8, H1, clearq)
+
+GEN_VEXT_INT_EXT(vsext_vf2_h, int16_t, int8_t, H2, H1, clearh)
+GEN_VEXT_INT_EXT(vsext_vf2_w, int32_t, int16_t, H4, H2, clearl)
+GEN_VEXT_INT_EXT(vsext_vf2_d, int64_t, int32_t, H8, H4, clearq)
+GEN_VEXT_INT_EXT(vsext_vf4_w, int32_t, int8_t, H4, H1, clearl)
+GEN_VEXT_INT_EXT(vsext_vf4_d, int64_t, int16_t, H8, H2, clearq)
+GEN_VEXT_INT_EXT(vsext_vf8_d, int64_t, int8_t, H8, H1, clearq)