@@ -1113,3 +1113,17 @@ DEF_HELPER_6(vcompress_vm_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vcompress_vm_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vcompress_vm_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vcompress_vm_d, void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_5(vzext_vf2_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vzext_vf2_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vzext_vf2_d, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vzext_vf4_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vzext_vf4_d, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vzext_vf8_d, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_5(vsext_vf2_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsext_vf2_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsext_vf2_d, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsext_vf4_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsext_vf4_d, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsext_vf8_d, void, ptr, ptr, ptr, env, i32)
@@ -630,5 +630,13 @@ vmv2r_v 100111 1 ..... 00001 011 ..... 1010111 @r2rd
vmv4r_v 100111 1 ..... 00011 011 ..... 1010111 @r2rd
vmv8r_v 100111 1 ..... 00111 011 ..... 1010111 @r2rd
+# Vector Integer Extension
+vzext_vf2 010010 . ..... 00110 010 ..... 1010111 @r2_vm
+vzext_vf4 010010 . ..... 00100 010 ..... 1010111 @r2_vm
+vzext_vf8 010010 . ..... 00010 010 ..... 1010111 @r2_vm
+vsext_vf2 010010 . ..... 00111 010 ..... 1010111 @r2_vm
+vsext_vf4 010010 . ..... 00101 010 ..... 1010111 @r2_vm
+vsext_vf8 010010 . ..... 00011 010 ..... 1010111 @r2_vm
+
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
@@ -3514,3 +3514,83 @@ GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
+
+static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
+{
+ uint8_t from = (s->sew + 3) - div;
+ bool ret = require_rvv(s) &&
+ (from >= 3 && from <= 8) &&
+ (a->rd != a->rs2) &&
+ require_align(a->rd, s->lmul) &&
+ require_align(a->rs2, s->lmul - div) &&
+ require_vm(a->vm, a->rd) &&
+ require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
+ return ret;
+}
+
+static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
+{
+ uint32_t data = 0;
+ gen_helper_gvec_3_ptr *fn;
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ static gen_helper_gvec_3_ptr * const fns[6][4] = {
+ {
+ NULL, gen_helper_vzext_vf2_h,
+ gen_helper_vzext_vf2_w, gen_helper_vzext_vf2_d
+ },
+ {
+ NULL, NULL,
+ gen_helper_vzext_vf4_w, gen_helper_vzext_vf4_d,
+ },
+ {
+ NULL, NULL,
+ NULL, gen_helper_vzext_vf8_d
+ },
+ {
+ NULL, gen_helper_vsext_vf2_h,
+ gen_helper_vsext_vf2_w, gen_helper_vsext_vf2_d
+ },
+ {
+ NULL, NULL,
+ gen_helper_vsext_vf4_w, gen_helper_vsext_vf4_d,
+ },
+ {
+ NULL, NULL,
+ NULL, gen_helper_vsext_vf8_d
+ }
+ };
+
+ fn = fns[seq][s->sew];
+ if (fn == NULL) {
+ return false;
+ }
+
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+ vreg_ofs(s, a->rs2), cpu_env, 0,
+ s->vlen / 8, data, fn);
+
+ mark_vs_dirty(s);
+ gen_set_label(over);
+ return true;
+}
+
+/* Vector Integer Extension */
+#define GEN_INT_EXT_TRANS(NAME, DIV, SEQ) \
+static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
+{ \
+ if (int_ext_check(s, a, DIV)) { \
+ return int_ext_op(s, a, SEQ); \
+ } \
+ return false; \
+}
+
+GEN_INT_EXT_TRANS(vzext_vf2, 1, 0)
+GEN_INT_EXT_TRANS(vzext_vf4, 2, 1)
+GEN_INT_EXT_TRANS(vzext_vf8, 3, 2)
+GEN_INT_EXT_TRANS(vsext_vf2, 1, 3)
+GEN_INT_EXT_TRANS(vsext_vf4, 2, 4)
+GEN_INT_EXT_TRANS(vsext_vf8, 3, 5)
@@ -4748,3 +4748,34 @@ GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t, H1)
GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2)
GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4)
GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8)
+
+/* Vector Integer Extension */
+#define GEN_VEXT_INT_EXT(NAME, ETYPE, DTYPE, HD, HS1) \
+void HELPER(NAME)(void *vd, void *v0, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t vl = env->vl; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, i)) { \
+ continue; \
+ } \
+ *((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \
+ } \
+}
+
+GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t, H2, H1)
+GEN_VEXT_INT_EXT(vzext_vf2_w, uint32_t, uint16_t, H4, H2)
+GEN_VEXT_INT_EXT(vzext_vf2_d, uint64_t, uint32_t, H8, H4)
+GEN_VEXT_INT_EXT(vzext_vf4_w, uint32_t, uint8_t, H4, H1)
+GEN_VEXT_INT_EXT(vzext_vf4_d, uint64_t, uint16_t, H8, H2)
+GEN_VEXT_INT_EXT(vzext_vf8_d, uint64_t, uint8_t, H8, H1)
+
+GEN_VEXT_INT_EXT(vsext_vf2_h, int16_t, int8_t, H2, H1)
+GEN_VEXT_INT_EXT(vsext_vf2_w, int32_t, int16_t, H4, H2)
+GEN_VEXT_INT_EXT(vsext_vf2_d, int64_t, int32_t, H8, H4)
+GEN_VEXT_INT_EXT(vsext_vf4_w, int32_t, int8_t, H4, H1)
+GEN_VEXT_INT_EXT(vsext_vf4_d, int64_t, int16_t, H8, H2)
+GEN_VEXT_INT_EXT(vsext_vf8_d, int64_t, int8_t, H8, H1)