@@ -949,3 +949,40 @@ DEF_HELPER_6(vfsgnjn_vf_d, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vfsgnjx_vf_h, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vfsgnjx_vf_w, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vfsgnjx_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+
+DEF_HELPER_6(vmfeq_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfeq_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfeq_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfne_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfne_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfne_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmflt_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmflt_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmflt_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfle_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfle_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfle_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfeq_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfeq_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfeq_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfne_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfne_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfne_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmflt_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmflt_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmflt_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfle_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfle_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfle_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfgt_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfgt_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfgt_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfge_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfge_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfge_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmford_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmford_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmford_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmford_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmford_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmford_vf_d, void, ptr, ptr, i64, ptr, env, i32)
@@ -500,6 +500,18 @@ vfsgnjn_vv 001001 . ..... ..... 001 ..... 1010111 @r_vm
vfsgnjn_vf 001001 . ..... ..... 101 ..... 1010111 @r_vm
vfsgnjx_vv 001010 . ..... ..... 001 ..... 1010111 @r_vm
vfsgnjx_vf 001010 . ..... ..... 101 ..... 1010111 @r_vm
+vmfeq_vv 011000 . ..... ..... 001 ..... 1010111 @r_vm
+vmfeq_vf 011000 . ..... ..... 101 ..... 1010111 @r_vm
+vmfne_vv 011100 . ..... ..... 001 ..... 1010111 @r_vm
+vmfne_vf 011100 . ..... ..... 101 ..... 1010111 @r_vm
+vmflt_vv 011011 . ..... ..... 001 ..... 1010111 @r_vm
+vmflt_vf 011011 . ..... ..... 101 ..... 1010111 @r_vm
+vmfle_vv 011001 . ..... ..... 001 ..... 1010111 @r_vm
+vmfle_vf 011001 . ..... ..... 101 ..... 1010111 @r_vm
+vmfgt_vf 011101 . ..... ..... 101 ..... 1010111 @r_vm
+vmfge_vf 011111 . ..... ..... 101 ..... 1010111 @r_vm
+vmford_vv 011010 . ..... ..... 001 ..... 1010111 @r_vm
+vmford_vf 011010 . ..... ..... 101 ..... 1010111 @r_vm
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
@@ -2000,3 +2000,36 @@ GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check)
GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check)
GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check)
GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
+
+/* Vector Floating-Point Compare Instructions */
+static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_reg(s, a->rs1, false) &&
+ (s->sew != 0) &&
+ ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
+ vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) ||
+ (s->lmul == 0)));
+}
+GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
+GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
+GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
+GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
+GEN_OPFVV_TRANS(vmford_vv, opfvv_cmp_check)
+
+static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rs2, false) &&
+ (s->sew != 0) &&
+ (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) ||
+ (s->lmul == 0)));
+}
+GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
+GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check)
+GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
+GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
+GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
+GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
+GEN_OPFVF_TRANS(vmford_vf, opfvf_cmp_check)
@@ -3871,3 +3871,185 @@ RVVCALL(OPFVF2, vfsgnjx_vf_d, OP_UUU_D, H8, H8, fsgnjx64)
GEN_VEXT_VF(vfsgnjx_vf_h, 2, 2, clearh)
GEN_VEXT_VF(vfsgnjx_vf_w, 4, 4, clearl)
GEN_VEXT_VF(vfsgnjx_vf_d, 8, 8, clearq)
+
+/* Vector Floating-Point Compare Instructions */
+#define GEN_VEXT_CMP_VV_ENV(NAME, ETYPE, H, DO_OP) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
+ uint32_t i; \
+ \
+ if (vl == 0) { \
+ return; \
+ } \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1, \
+ &env->fp_status)); \
+ } \
+ for (; i < vlmax; i++) { \
+ vext_set_elem_mask(vd, mlen, i, 0); \
+ } \
+}
+
+static uint8_t float16_eq_quiet(uint16_t a, uint16_t b, float_status *s)
+{
+ int compare = float16_compare_quiet(a, b, s);
+ return compare == float_relation_equal;
+}
+
+GEN_VEXT_CMP_VV_ENV(vmfeq_vv_h, uint16_t, H2, float16_eq_quiet)
+GEN_VEXT_CMP_VV_ENV(vmfeq_vv_w, uint32_t, H4, float32_eq_quiet)
+GEN_VEXT_CMP_VV_ENV(vmfeq_vv_d, uint64_t, H8, float64_eq_quiet)
+
+#define GEN_VEXT_CMP_VF(NAME, ETYPE, H, DO_OP) \
+void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
+ uint32_t i; \
+ \
+ if (vl == 0) { \
+ return; \
+ } \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ vext_set_elem_mask(vd, mlen, i, DO_OP(s2, (ETYPE)s1, \
+ &env->fp_status)); \
+ } \
+ for (; i < vlmax; i++) { \
+ vext_set_elem_mask(vd, mlen, i, 0); \
+ } \
+}
+GEN_VEXT_CMP_VF(vmfeq_vf_h, uint16_t, H2, float16_eq_quiet)
+GEN_VEXT_CMP_VF(vmfeq_vf_w, uint32_t, H4, float32_eq_quiet)
+GEN_VEXT_CMP_VF(vmfeq_vf_d, uint64_t, H8, float64_eq_quiet)
+
+static uint8_t vmfne16(uint16_t a, uint16_t b, float_status *s)
+{
+ int compare = float16_compare_quiet(a, b, s);
+ return compare != float_relation_equal &&
+ compare != float_relation_unordered;
+}
+
+static uint8_t vmfne32(uint32_t a, uint32_t b, float_status *s)
+{
+ int compare = float32_compare_quiet(a, b, s);
+ return compare != float_relation_equal &&
+ compare != float_relation_unordered;
+}
+
+static uint8_t vmfne64(uint64_t a, uint64_t b, float_status *s)
+{
+ int compare = float64_compare_quiet(a, b, s);
+ return compare != float_relation_equal &&
+ compare != float_relation_unordered;
+}
+
+GEN_VEXT_CMP_VV_ENV(vmfne_vv_h, uint16_t, H2, vmfne16)
+GEN_VEXT_CMP_VV_ENV(vmfne_vv_w, uint32_t, H4, vmfne32)
+GEN_VEXT_CMP_VV_ENV(vmfne_vv_d, uint64_t, H8, vmfne64)
+GEN_VEXT_CMP_VF(vmfne_vf_h, uint16_t, H2, vmfne16)
+GEN_VEXT_CMP_VF(vmfne_vf_w, uint32_t, H4, vmfne32)
+GEN_VEXT_CMP_VF(vmfne_vf_d, uint64_t, H8, vmfne64)
+
+static uint8_t float16_lt(uint16_t a, uint16_t b, float_status *s)
+{
+ int compare = float16_compare(a, b, s);
+ return compare == float_relation_less;
+}
+
+GEN_VEXT_CMP_VV_ENV(vmflt_vv_h, uint16_t, H2, float16_lt)
+GEN_VEXT_CMP_VV_ENV(vmflt_vv_w, uint32_t, H4, float32_lt)
+GEN_VEXT_CMP_VV_ENV(vmflt_vv_d, uint64_t, H8, float64_lt)
+GEN_VEXT_CMP_VF(vmflt_vf_h, uint16_t, H2, float16_lt)
+GEN_VEXT_CMP_VF(vmflt_vf_w, uint32_t, H4, float32_lt)
+GEN_VEXT_CMP_VF(vmflt_vf_d, uint64_t, H8, float64_lt)
+
+static uint8_t float16_le(uint16_t a, uint16_t b, float_status *s)
+{
+ int compare = float16_compare(a, b, s);
+ return compare == float_relation_less ||
+ compare == float_relation_equal;
+}
+
+GEN_VEXT_CMP_VV_ENV(vmfle_vv_h, uint16_t, H2, float16_le)
+GEN_VEXT_CMP_VV_ENV(vmfle_vv_w, uint32_t, H4, float32_le)
+GEN_VEXT_CMP_VV_ENV(vmfle_vv_d, uint64_t, H8, float64_le)
+GEN_VEXT_CMP_VF(vmfle_vf_h, uint16_t, H2, float16_le)
+GEN_VEXT_CMP_VF(vmfle_vf_w, uint32_t, H4, float32_le)
+GEN_VEXT_CMP_VF(vmfle_vf_d, uint64_t, H8, float64_le)
+
+static uint8_t vmfgt16(uint16_t a, uint16_t b, float_status *s)
+{
+ int compare = float16_compare(a, b, s);
+ return compare == float_relation_greater;
+}
+
+static uint8_t vmfgt32(uint32_t a, uint32_t b, float_status *s)
+{
+ int compare = float32_compare(a, b, s);
+ return compare == float_relation_greater;
+}
+
+static uint8_t vmfgt64(uint64_t a, uint64_t b, float_status *s)
+{
+ int compare = float64_compare(a, b, s);
+ return compare == float_relation_greater;
+}
+
+GEN_VEXT_CMP_VF(vmfgt_vf_h, uint16_t, H2, vmfgt16)
+GEN_VEXT_CMP_VF(vmfgt_vf_w, uint32_t, H4, vmfgt32)
+GEN_VEXT_CMP_VF(vmfgt_vf_d, uint64_t, H8, vmfgt64)
+
+static uint8_t vmfge16(uint16_t a, uint16_t b, float_status *s)
+{
+ int compare = float16_compare(a, b, s);
+ return compare == float_relation_greater ||
+ compare == float_relation_equal;
+}
+
+static uint8_t vmfge32(uint32_t a, uint32_t b, float_status *s)
+{
+ int compare = float32_compare(a, b, s);
+ return compare == float_relation_greater ||
+ compare == float_relation_equal;
+}
+
+static uint8_t vmfge64(uint64_t a, uint64_t b, float_status *s)
+{
+ int compare = float64_compare(a, b, s);
+ return compare == float_relation_greater ||
+ compare == float_relation_equal;
+}
+
+GEN_VEXT_CMP_VF(vmfge_vf_h, uint16_t, H2, vmfge16)
+GEN_VEXT_CMP_VF(vmfge_vf_w, uint32_t, H4, vmfge32)
+GEN_VEXT_CMP_VF(vmfge_vf_d, uint64_t, H8, vmfge64)
+
+static uint8_t float16_unordered_quiet(uint16_t a, uint16_t b, float_status *s)
+{
+ int compare = float16_compare_quiet(a, b, s);
+ return compare == float_relation_unordered;
+}
+
+GEN_VEXT_CMP_VV_ENV(vmford_vv_h, uint16_t, H2, !float16_unordered_quiet)
+GEN_VEXT_CMP_VV_ENV(vmford_vv_w, uint32_t, H4, !float32_unordered_quiet)
+GEN_VEXT_CMP_VV_ENV(vmford_vv_d, uint64_t, H8, !float64_unordered_quiet)
+GEN_VEXT_CMP_VF(vmford_vf_h, uint16_t, H2, !float16_unordered_quiet)
+GEN_VEXT_CMP_VF(vmford_vf_w, uint32_t, H4, !float32_unordered_quiet)
+GEN_VEXT_CMP_VF(vmford_vf_d, uint64_t, H8, !float64_unordered_quiet)
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com> --- target/riscv/helper.h | 37 +++++ target/riscv/insn32.decode | 12 ++ target/riscv/insn_trans/trans_rvv.inc.c | 33 +++++ target/riscv/vector_helper.c | 182 ++++++++++++++++++++++++ 4 files changed, 264 insertions(+)