@@ -793,3 +793,19 @@ DEF_HELPER_6(vnclipu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclip_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclip_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclip_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vfadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfadd_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfadd_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfadd_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsub_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsub_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsub_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfrsub_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfrsub_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfrsub_vf_d, void, ptr, ptr, i64, ptr, env, i32)
@@ -443,6 +443,11 @@ vnclipu_vi 101110 . ..... ..... 011 ..... 1010111 @r_vm
vnclip_vv 101111 . ..... ..... 000 ..... 1010111 @r_vm
vnclip_vx 101111 . ..... ..... 100 ..... 1010111 @r_vm
vnclip_vi 101111 . ..... ..... 011 ..... 1010111 @r_vm
+vfadd_vv 000000 . ..... ..... 001 ..... 1010111 @r_vm
+vfadd_vf 000000 . ..... ..... 101 ..... 1010111 @r_vm
+vfsub_vv 000010 . ..... ..... 001 ..... 1010111 @r_vm
+vfsub_vf 000010 . ..... ..... 101 ..... 1010111 @r_vm
+vfrsub_vf 100111 . ..... ..... 101 ..... 1010111 @r_vm
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
@@ -1672,3 +1672,110 @@ GEN_OPIVX_NARROW_TRANS(vnclipu_vx)
GEN_OPIVX_NARROW_TRANS(vnclip_vx)
GEN_OPIVI_NARROW_TRANS(vnclipu_vi, 1, vnclipu_vx)
GEN_OPIVI_NARROW_TRANS(vnclip_vi, 1, vnclip_vx)
+
+/*
+ *** Vector Float Point Arithmetic Instructions
+ */
+/* Vector Single-Width Floating-Point Add/Subtract Instructions */
+
+/*
+ * If the current SEW does not correspond to a supported IEEE floating-point
+ * type, an illegal instruction exception is raised.
+ */
+static bool opfvv_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_reg(s, a->rs1, false) &&
+ (s->sew != 0));
+}
+
+/* OPFVV without GVEC IR */
+#define GEN_OPFVV_TRANS(NAME, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (CHECK(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_gvec_4_ptr * const fns[3] = { \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, \
+ gen_helper_##NAME##_d, \
+ }; \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
+ vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), \
+ cpu_env, 0, s->vlen / 8, data, fns[s->sew - 1]); \
+ return true; \
+ } \
+ return false; \
+}
+GEN_OPFVV_TRANS(vfadd_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfsub_vv, opfvv_check)
+
+typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
+ TCGv_env, TCGv_i32);
+
+static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+ uint32_t data, gen_helper_opfvf *fn, DisasContext *s)
+{
+ TCGv_ptr dest, src2, mask;
+ TCGv_i32 desc;
+
+ dest = tcg_temp_new_ptr();
+ mask = tcg_temp_new_ptr();
+ src2 = tcg_temp_new_ptr();
+ desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
+
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
+ tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+
+ fn(dest, mask, cpu_fpr[rs1], src2, cpu_env, desc);
+
+ tcg_temp_free_ptr(dest);
+ tcg_temp_free_ptr(mask);
+ tcg_temp_free_ptr(src2);
+ tcg_temp_free_i32(desc);
+ return true;
+}
+
+static bool opfvf_check(DisasContext *s, arg_rmrr *a)
+{
+/*
+ * If the current SEW does not correspond to a supported IEEE floating-point
+ * type, an illegal instruction exception is raised
+ */
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ (s->sew != 0));
+}
+
+/* OPFVF without GVEC IR */
+#define GEN_OPFVF_TRANS(NAME, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (CHECK(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_opfvf *const fns[3] = { \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, \
+ gen_helper_##NAME##_d, \
+ }; \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
+ fns[s->sew - 1], s); \
+ } \
+ return false; \
+}
+
+GEN_OPFVF_TRANS(vfadd_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfsub_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check)
@@ -21,6 +21,7 @@
#include "exec/memop.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
#include "tcg/tcg-gvec-desc.h"
#include "internals.h"
#include <math.h>
@@ -3151,3 +3152,117 @@ RVVCALL(OPIVX2_RM, vnclipu_vx_w, NOP_UUU_W, H4, H8, vnclipu32)
GEN_VEXT_VX_RM(vnclipu_vx_b, 1, 1, clearb)
GEN_VEXT_VX_RM(vnclipu_vx_h, 2, 2, clearh)
GEN_VEXT_VX_RM(vnclipu_vx_w, 4, 4, clearl)
+
+/*
+ *** Vector Float Point Arithmetic Instructions
+ */
+/* Vector Single-Width Floating-Point Add/Subtract Instructions */
+#define OPFVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
+static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \
+ CPURISCVState *env) \
+{ \
+ TX1 s1 = *((T1 *)vs1 + HS1(i)); \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ *((TD *)vd + HD(i)) = OP(s2, s1, &env->fp_status); \
+}
+
+#define GEN_VEXT_VV_ENV(NAME, ESZ, DSZ, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, \
+ void *vs2, CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t vlmax = vext_maxsz(desc) / ESZ; \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ if (vl == 0) { \
+ return; \
+ } \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ do_##NAME(vd, vs1, vs2, i, env); \
+ } \
+ CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \
+}
+
+RVVCALL(OPFVV2, vfadd_vv_h, OP_UUU_H, H2, H2, H2, float16_add)
+RVVCALL(OPFVV2, vfadd_vv_w, OP_UUU_W, H4, H4, H4, float32_add)
+RVVCALL(OPFVV2, vfadd_vv_d, OP_UUU_D, H8, H8, H8, float64_add)
+GEN_VEXT_VV_ENV(vfadd_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfadd_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfadd_vv_d, 8, 8, clearq)
+
+#define OPFVF2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
+static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
+ CPURISCVState *env) \
+{ \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, &env->fp_status);\
+}
+
+#define GEN_VEXT_VF(NAME, ESZ, DSZ, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
+ void *vs2, CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t vlmax = vext_maxsz(desc) / ESZ; \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ if (vl == 0) { \
+ return; \
+ } \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ do_##NAME(vd, s1, vs2, i, env); \
+ } \
+ CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \
+}
+
+RVVCALL(OPFVF2, vfadd_vf_h, OP_UUU_H, H2, H2, float16_add)
+RVVCALL(OPFVF2, vfadd_vf_w, OP_UUU_W, H4, H4, float32_add)
+RVVCALL(OPFVF2, vfadd_vf_d, OP_UUU_D, H8, H8, float64_add)
+GEN_VEXT_VF(vfadd_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfadd_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfadd_vf_d, 8, 8, clearq)
+
+RVVCALL(OPFVV2, vfsub_vv_h, OP_UUU_H, H2, H2, H2, float16_sub)
+RVVCALL(OPFVV2, vfsub_vv_w, OP_UUU_W, H4, H4, H4, float32_sub)
+RVVCALL(OPFVV2, vfsub_vv_d, OP_UUU_D, H8, H8, H8, float64_sub)
+GEN_VEXT_VV_ENV(vfsub_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfsub_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfsub_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF2, vfsub_vf_h, OP_UUU_H, H2, H2, float16_sub)
+RVVCALL(OPFVF2, vfsub_vf_w, OP_UUU_W, H4, H4, float32_sub)
+RVVCALL(OPFVF2, vfsub_vf_d, OP_UUU_D, H8, H8, float64_sub)
+GEN_VEXT_VF(vfsub_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfsub_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfsub_vf_d, 8, 8, clearq)
+
+static uint16_t float16_rsub(uint16_t a, uint16_t b, float_status *s)
+{
+ return float16_sub(b, a, s);
+}
+
+static uint32_t float32_rsub(uint32_t a, uint32_t b, float_status *s)
+{
+ return float32_sub(b, a, s);
+}
+
+static uint64_t float64_rsub(uint64_t a, uint64_t b, float_status *s)
+{
+ return float64_sub(b, a, s);
+}
+
+RVVCALL(OPFVF2, vfrsub_vf_h, OP_UUU_H, H2, H2, float16_rsub)
+RVVCALL(OPFVF2, vfrsub_vf_w, OP_UUU_W, H4, H4, float32_rsub)
+RVVCALL(OPFVF2, vfrsub_vf_d, OP_UUU_D, H8, H8, float64_rsub)
+GEN_VEXT_VF(vfrsub_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfrsub_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfrsub_vf_d, 8, 8, clearq)