@@ -583,8 +583,8 @@ vid_v 010100 . 00000 10001 010 ..... 1010111 @r1_vm
vmv_x_s 010000 1 ..... 00000 010 ..... 1010111 @r2rd
vmv_s_x 010000 1 00000 ..... 110 ..... 1010111 @r2
vext_x_v 001100 1 ..... ..... 010 ..... 1010111 @r
-vfmv_f_s 001100 1 ..... 00000 001 ..... 1010111 @r2rd
-vfmv_s_f 001101 1 00000 ..... 101 ..... 1010111 @r2
+vfmv_f_s 010000 1 ..... 00000 001 ..... 1010111 @r2rd
+vfmv_s_f 010000 1 00000 ..... 101 ..... 1010111 @r2
vslideup_vx 001110 . ..... ..... 100 ..... 1010111 @r_vm
vslideup_vi 001110 . ..... ..... 011 ..... 1010111 @r_vm
vslide1up_vx 001110 . ..... ..... 110 ..... 1010111 @r_vm
@@ -3038,50 +3038,50 @@ done:
/* Floating-Point Scalar Move Instructions */
static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
{
- if (!s->vill && has_ext(s, RVF) &&
- (s->mstatus_fs != 0) && (s->sew != 0)) {
- unsigned int len = 8 << s->sew;
-
- vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0);
- if (len < 64) {
- tcg_gen_ori_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
- MAKE_64BIT_MASK(len, 64 - len));
- }
+ REQUIRE_RVV;
+ VEXT_CHECK_ISA_ILL(s);
+ require(has_ext(s, RVF));
+ require(s->mstatus_fs != 0);
+ require(s->sew != 0);
- mark_fs_dirty(s);
- return true;
+ unsigned int len = 8 << s->sew;
+
+ vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false);
+ if (len < 64) {
+ tcg_gen_ori_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
+ MAKE_64BIT_MASK(len, 64 - len));
}
- return false;
+
+ mark_fs_dirty(s);
+ return true;
}
/* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
{
- if (!s->vill && has_ext(s, RVF) && (s->sew != 0)) {
- TCGv_i64 t1;
- /* The instructions ignore LMUL and vector register group. */
- uint32_t vlmax = s->vlen >> 3;
+ REQUIRE_RVV;
+ VEXT_CHECK_ISA_ILL(s);
+ require(has_ext(s, RVF));
+ require(s->sew != 0);
- /* if vl == 0, skip vector register write back */
- TCGLabel *over = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ /* The instructions ignore LMUL and vector register group. */
+ TCGv_i64 t1;
+ TCGLabel *over = gen_new_label();
- /* zeroed all elements */
- tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), vlmax, vlmax, 0);
+ /* if vl == 0, skip vector register write back */
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
- /* NaN-box f[rs1] as necessary for SEW */
- t1 = tcg_temp_new_i64();
- if (s->sew == MO_64 && !has_ext(s, RVD)) {
- tcg_gen_ori_i64(t1, cpu_fpr[a->rs1], MAKE_64BIT_MASK(32, 32));
- } else {
- tcg_gen_mov_i64(t1, cpu_fpr[a->rs1]);
- }
- vec_element_storei(s, a->rd, 0, t1);
- tcg_temp_free_i64(t1);
- gen_set_label(over);
- return true;
+ /* NaN-box f[rs1] as necessary for SEW */
+ t1 = tcg_temp_new_i64();
+ if (s->sew == MO_64 && !has_ext(s, RVD)) {
+ tcg_gen_ori_i64(t1, cpu_fpr[a->rs1], MAKE_64BIT_MASK(32, 32));
+ } else {
+ tcg_gen_mov_i64(t1, cpu_fpr[a->rs1]);
}
- return false;
+ vec_element_storei(s, a->rd, 0, t1);
+ tcg_temp_free_i64(t1);
+ gen_set_label(over);
+ return true;
}
/* Vector Slide Instructions */