@@ -2342,6 +2342,49 @@ INSN_LASX(xvffintl_d_w, vv)
INSN_LASX(xvffinth_d_w, vv)
INSN_LASX(xvffint_s_l, vvv)
+INSN_LASX(xvseq_b, vvv)
+INSN_LASX(xvseq_h, vvv)
+INSN_LASX(xvseq_w, vvv)
+INSN_LASX(xvseq_d, vvv)
+INSN_LASX(xvseqi_b, vv_i)
+INSN_LASX(xvseqi_h, vv_i)
+INSN_LASX(xvseqi_w, vv_i)
+INSN_LASX(xvseqi_d, vv_i)
+
+INSN_LASX(xvsle_b, vvv)
+INSN_LASX(xvsle_h, vvv)
+INSN_LASX(xvsle_w, vvv)
+INSN_LASX(xvsle_d, vvv)
+INSN_LASX(xvslei_b, vv_i)
+INSN_LASX(xvslei_h, vv_i)
+INSN_LASX(xvslei_w, vv_i)
+INSN_LASX(xvslei_d, vv_i)
+INSN_LASX(xvsle_bu, vvv)
+INSN_LASX(xvsle_hu, vvv)
+INSN_LASX(xvsle_wu, vvv)
+INSN_LASX(xvsle_du, vvv)
+INSN_LASX(xvslei_bu, vv_i)
+INSN_LASX(xvslei_hu, vv_i)
+INSN_LASX(xvslei_wu, vv_i)
+INSN_LASX(xvslei_du, vv_i)
+
+INSN_LASX(xvslt_b, vvv)
+INSN_LASX(xvslt_h, vvv)
+INSN_LASX(xvslt_w, vvv)
+INSN_LASX(xvslt_d, vvv)
+INSN_LASX(xvslti_b, vv_i)
+INSN_LASX(xvslti_h, vv_i)
+INSN_LASX(xvslti_w, vv_i)
+INSN_LASX(xvslti_d, vv_i)
+INSN_LASX(xvslt_bu, vvv)
+INSN_LASX(xvslt_hu, vvv)
+INSN_LASX(xvslt_wu, vvv)
+INSN_LASX(xvslt_du, vvv)
+INSN_LASX(xvslti_bu, vv_i)
+INSN_LASX(xvslti_hu, vv_i)
+INSN_LASX(xvslti_wu, vv_i)
+INSN_LASX(xvslti_du, vv_i)
+
INSN_LASX(xvreplgr2vr_b, vr)
INSN_LASX(xvreplgr2vr_h, vr)
INSN_LASX(xvreplgr2vr_w, vr)
@@ -715,6 +715,49 @@ TRANS(xvffintl_d_w, gen_vv, 32, gen_helper_vffintl_d_w)
TRANS(xvffinth_d_w, gen_vv, 32, gen_helper_vffinth_d_w)
TRANS(xvffint_s_l, gen_vvv, 32, gen_helper_vffint_s_l)
+TRANS(xvseq_b, do_cmp, 32, MO_8, TCG_COND_EQ)
+TRANS(xvseq_h, do_cmp, 32, MO_16, TCG_COND_EQ)
+TRANS(xvseq_w, do_cmp, 32, MO_32, TCG_COND_EQ)
+TRANS(xvseq_d, do_cmp, 32, MO_64, TCG_COND_EQ)
+TRANS(xvseqi_b, do_vseqi_s, 32, MO_8)
+TRANS(xvseqi_h, do_vseqi_s, 32, MO_16)
+TRANS(xvseqi_w, do_vseqi_s, 32, MO_32)
+TRANS(xvseqi_d, do_vseqi_s, 32, MO_64)
+
+TRANS(xvsle_b, do_cmp, 32, MO_8, TCG_COND_LE)
+TRANS(xvsle_h, do_cmp, 32, MO_16, TCG_COND_LE)
+TRANS(xvsle_w, do_cmp, 32, MO_32, TCG_COND_LE)
+TRANS(xvsle_d, do_cmp, 32, MO_64, TCG_COND_LE)
+TRANS(xvslei_b, do_vslei_s, 32, MO_8)
+TRANS(xvslei_h, do_vslei_s, 32, MO_16)
+TRANS(xvslei_w, do_vslei_s, 32, MO_32)
+TRANS(xvslei_d, do_vslei_s, 32, MO_64)
+TRANS(xvsle_bu, do_cmp, 32, MO_8, TCG_COND_LEU)
+TRANS(xvsle_hu, do_cmp, 32, MO_16, TCG_COND_LEU)
+TRANS(xvsle_wu, do_cmp, 32, MO_32, TCG_COND_LEU)
+TRANS(xvsle_du, do_cmp, 32, MO_64, TCG_COND_LEU)
+TRANS(xvslei_bu, do_vslei_u, 32, MO_8)
+TRANS(xvslei_hu, do_vslei_u, 32, MO_16)
+TRANS(xvslei_wu, do_vslei_u, 32, MO_32)
+TRANS(xvslei_du, do_vslei_u, 32, MO_64)
+
+TRANS(xvslt_b, do_cmp, 32, MO_8, TCG_COND_LT)
+TRANS(xvslt_h, do_cmp, 32, MO_16, TCG_COND_LT)
+TRANS(xvslt_w, do_cmp, 32, MO_32, TCG_COND_LT)
+TRANS(xvslt_d, do_cmp, 32, MO_64, TCG_COND_LT)
+TRANS(xvslti_b, do_vslti_s, 32, MO_8)
+TRANS(xvslti_h, do_vslti_s, 32, MO_16)
+TRANS(xvslti_w, do_vslti_s, 32, MO_32)
+TRANS(xvslti_d, do_vslti_s, 32, MO_64)
+TRANS(xvslt_bu, do_cmp, 32, MO_8, TCG_COND_LTU)
+TRANS(xvslt_hu, do_cmp, 32, MO_16, TCG_COND_LTU)
+TRANS(xvslt_wu, do_cmp, 32, MO_32, TCG_COND_LTU)
+TRANS(xvslt_du, do_cmp, 32, MO_64, TCG_COND_LTU)
+TRANS(xvslti_bu, do_vslti_u, 32, MO_8)
+TRANS(xvslti_hu, do_vslti_u, 32, MO_16)
+TRANS(xvslti_wu, do_vslti_u, 32, MO_32)
+TRANS(xvslti_du, do_vslti_u, 32, MO_64)
+
TRANS(xvreplgr2vr_b, gvec_dup, 32, MO_8)
TRANS(xvreplgr2vr_h, gvec_dup, 32, MO_16)
TRANS(xvreplgr2vr_w, gvec_dup, 32, MO_32)
@@ -3686,7 +3686,8 @@ TRANS(vffintl_d_w, gen_vv, 16, gen_helper_vffintl_d_w)
TRANS(vffinth_d_w, gen_vv, 16, gen_helper_vffinth_d_w)
TRANS(vffint_s_l, gen_vvv, 16, gen_helper_vffint_s_l)
-static bool do_cmp(DisasContext *ctx, arg_vvv *a, MemOp mop, TCGCond cond)
+static bool do_cmp(DisasContext *ctx, arg_vvv *a,
+ uint32_t oprsz, MemOp mop, TCGCond cond)
{
uint32_t vd_ofs, vj_ofs, vk_ofs;
@@ -3696,7 +3697,7 @@ static bool do_cmp(DisasContext *ctx, arg_vvv *a, MemOp mop, TCGCond cond)
vj_ofs = vec_full_offset(a->vj);
vk_ofs = vec_full_offset(a->vk);
- tcg_gen_gvec_cmp(cond, mop, vd_ofs, vj_ofs, vk_ofs, 16, ctx->vl/8);
+ tcg_gen_gvec_cmp(cond, mop, vd_ofs, vj_ofs, vk_ofs, oprsz, ctx->vl / 8);
return true;
}
@@ -3731,145 +3732,147 @@ static void gen_vslti_u_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
do_cmpi_vec(TCG_COND_LTU, vece, t, a, imm);
}
-#define DO_CMPI_S(NAME) \
-static bool do_## NAME ##_s(DisasContext *ctx, arg_vv_i *a, MemOp mop) \
-{ \
- uint32_t vd_ofs, vj_ofs; \
- \
- CHECK_VEC; \
- \
- static const TCGOpcode vecop_list[] = { \
- INDEX_op_cmp_vec, 0 \
- }; \
- static const GVecGen2i op[4] = { \
- { \
- .fniv = gen_## NAME ##_s_vec, \
- .fnoi = gen_helper_## NAME ##_b, \
- .opt_opc = vecop_list, \
- .vece = MO_8 \
- }, \
- { \
- .fniv = gen_## NAME ##_s_vec, \
- .fnoi = gen_helper_## NAME ##_h, \
- .opt_opc = vecop_list, \
- .vece = MO_16 \
- }, \
- { \
- .fniv = gen_## NAME ##_s_vec, \
- .fnoi = gen_helper_## NAME ##_w, \
- .opt_opc = vecop_list, \
- .vece = MO_32 \
- }, \
- { \
- .fniv = gen_## NAME ##_s_vec, \
- .fnoi = gen_helper_## NAME ##_d, \
- .opt_opc = vecop_list, \
- .vece = MO_64 \
- } \
- }; \
- \
- vd_ofs = vec_full_offset(a->vd); \
- vj_ofs = vec_full_offset(a->vj); \
- \
- tcg_gen_gvec_2i(vd_ofs, vj_ofs, 16, ctx->vl/8, a->imm, &op[mop]); \
- \
- return true; \
+#define DO_CMPI_S(NAME) \
+static bool do_## NAME ##_s(DisasContext *ctx, \
+ arg_vv_i *a, uint32_t oprsz, MemOp mop) \
+{ \
+ uint32_t vd_ofs, vj_ofs; \
+ \
+ CHECK_VEC; \
+ \
+ static const TCGOpcode vecop_list[] = { \
+ INDEX_op_cmp_vec, 0 \
+ }; \
+ static const GVecGen2i op[4] = { \
+ { \
+ .fniv = gen_## NAME ##_s_vec, \
+ .fnoi = gen_helper_## NAME ##_b, \
+ .opt_opc = vecop_list, \
+ .vece = MO_8 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_s_vec, \
+ .fnoi = gen_helper_## NAME ##_h, \
+ .opt_opc = vecop_list, \
+ .vece = MO_16 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_s_vec, \
+ .fnoi = gen_helper_## NAME ##_w, \
+ .opt_opc = vecop_list, \
+ .vece = MO_32 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_s_vec, \
+ .fnoi = gen_helper_## NAME ##_d, \
+ .opt_opc = vecop_list, \
+ .vece = MO_64 \
+ } \
+ }; \
+ \
+ vd_ofs = vec_full_offset(a->vd); \
+ vj_ofs = vec_full_offset(a->vj); \
+ \
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, ctx->vl / 8, a->imm, &op[mop]); \
+ \
+ return true; \
}
DO_CMPI_S(vseqi)
DO_CMPI_S(vslei)
DO_CMPI_S(vslti)
-#define DO_CMPI_U(NAME) \
-static bool do_## NAME ##_u(DisasContext *ctx, arg_vv_i *a, MemOp mop) \
-{ \
- uint32_t vd_ofs, vj_ofs; \
- \
- CHECK_VEC; \
- \
- static const TCGOpcode vecop_list[] = { \
- INDEX_op_cmp_vec, 0 \
- }; \
- static const GVecGen2i op[4] = { \
- { \
- .fniv = gen_## NAME ##_u_vec, \
- .fnoi = gen_helper_## NAME ##_bu, \
- .opt_opc = vecop_list, \
- .vece = MO_8 \
- }, \
- { \
- .fniv = gen_## NAME ##_u_vec, \
- .fnoi = gen_helper_## NAME ##_hu, \
- .opt_opc = vecop_list, \
- .vece = MO_16 \
- }, \
- { \
- .fniv = gen_## NAME ##_u_vec, \
- .fnoi = gen_helper_## NAME ##_wu, \
- .opt_opc = vecop_list, \
- .vece = MO_32 \
- }, \
- { \
- .fniv = gen_## NAME ##_u_vec, \
- .fnoi = gen_helper_## NAME ##_du, \
- .opt_opc = vecop_list, \
- .vece = MO_64 \
- } \
- }; \
- \
- vd_ofs = vec_full_offset(a->vd); \
- vj_ofs = vec_full_offset(a->vj); \
- \
- tcg_gen_gvec_2i(vd_ofs, vj_ofs, 16, ctx->vl/8, a->imm, &op[mop]); \
- \
- return true; \
+#define DO_CMPI_U(NAME) \
+static bool do_## NAME ##_u(DisasContext *ctx, \
+ arg_vv_i *a, uint32_t oprsz, MemOp mop) \
+{ \
+ uint32_t vd_ofs, vj_ofs; \
+ \
+ CHECK_VEC; \
+ \
+ static const TCGOpcode vecop_list[] = { \
+ INDEX_op_cmp_vec, 0 \
+ }; \
+ static const GVecGen2i op[4] = { \
+ { \
+ .fniv = gen_## NAME ##_u_vec, \
+ .fnoi = gen_helper_## NAME ##_bu, \
+ .opt_opc = vecop_list, \
+ .vece = MO_8 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_u_vec, \
+ .fnoi = gen_helper_## NAME ##_hu, \
+ .opt_opc = vecop_list, \
+ .vece = MO_16 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_u_vec, \
+ .fnoi = gen_helper_## NAME ##_wu, \
+ .opt_opc = vecop_list, \
+ .vece = MO_32 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_u_vec, \
+ .fnoi = gen_helper_## NAME ##_du, \
+ .opt_opc = vecop_list, \
+ .vece = MO_64 \
+ } \
+ }; \
+ \
+ vd_ofs = vec_full_offset(a->vd); \
+ vj_ofs = vec_full_offset(a->vj); \
+ \
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, ctx->vl / 8, a->imm, &op[mop]); \
+ \
+ return true; \
}
DO_CMPI_U(vslei)
DO_CMPI_U(vslti)
-TRANS(vseq_b, do_cmp, MO_8, TCG_COND_EQ)
-TRANS(vseq_h, do_cmp, MO_16, TCG_COND_EQ)
-TRANS(vseq_w, do_cmp, MO_32, TCG_COND_EQ)
-TRANS(vseq_d, do_cmp, MO_64, TCG_COND_EQ)
-TRANS(vseqi_b, do_vseqi_s, MO_8)
-TRANS(vseqi_h, do_vseqi_s, MO_16)
-TRANS(vseqi_w, do_vseqi_s, MO_32)
-TRANS(vseqi_d, do_vseqi_s, MO_64)
-
-TRANS(vsle_b, do_cmp, MO_8, TCG_COND_LE)
-TRANS(vsle_h, do_cmp, MO_16, TCG_COND_LE)
-TRANS(vsle_w, do_cmp, MO_32, TCG_COND_LE)
-TRANS(vsle_d, do_cmp, MO_64, TCG_COND_LE)
-TRANS(vslei_b, do_vslei_s, MO_8)
-TRANS(vslei_h, do_vslei_s, MO_16)
-TRANS(vslei_w, do_vslei_s, MO_32)
-TRANS(vslei_d, do_vslei_s, MO_64)
-TRANS(vsle_bu, do_cmp, MO_8, TCG_COND_LEU)
-TRANS(vsle_hu, do_cmp, MO_16, TCG_COND_LEU)
-TRANS(vsle_wu, do_cmp, MO_32, TCG_COND_LEU)
-TRANS(vsle_du, do_cmp, MO_64, TCG_COND_LEU)
-TRANS(vslei_bu, do_vslei_u, MO_8)
-TRANS(vslei_hu, do_vslei_u, MO_16)
-TRANS(vslei_wu, do_vslei_u, MO_32)
-TRANS(vslei_du, do_vslei_u, MO_64)
-
-TRANS(vslt_b, do_cmp, MO_8, TCG_COND_LT)
-TRANS(vslt_h, do_cmp, MO_16, TCG_COND_LT)
-TRANS(vslt_w, do_cmp, MO_32, TCG_COND_LT)
-TRANS(vslt_d, do_cmp, MO_64, TCG_COND_LT)
-TRANS(vslti_b, do_vslti_s, MO_8)
-TRANS(vslti_h, do_vslti_s, MO_16)
-TRANS(vslti_w, do_vslti_s, MO_32)
-TRANS(vslti_d, do_vslti_s, MO_64)
-TRANS(vslt_bu, do_cmp, MO_8, TCG_COND_LTU)
-TRANS(vslt_hu, do_cmp, MO_16, TCG_COND_LTU)
-TRANS(vslt_wu, do_cmp, MO_32, TCG_COND_LTU)
-TRANS(vslt_du, do_cmp, MO_64, TCG_COND_LTU)
-TRANS(vslti_bu, do_vslti_u, MO_8)
-TRANS(vslti_hu, do_vslti_u, MO_16)
-TRANS(vslti_wu, do_vslti_u, MO_32)
-TRANS(vslti_du, do_vslti_u, MO_64)
+TRANS(vseq_b, do_cmp, 16, MO_8, TCG_COND_EQ)
+TRANS(vseq_h, do_cmp, 16, MO_16, TCG_COND_EQ)
+TRANS(vseq_w, do_cmp, 16, MO_32, TCG_COND_EQ)
+TRANS(vseq_d, do_cmp, 16, MO_64, TCG_COND_EQ)
+TRANS(vseqi_b, do_vseqi_s, 16, MO_8)
+TRANS(vseqi_h, do_vseqi_s, 16, MO_16)
+TRANS(vseqi_w, do_vseqi_s, 16, MO_32)
+TRANS(vseqi_d, do_vseqi_s, 16, MO_64)
+
+TRANS(vsle_b, do_cmp, 16, MO_8, TCG_COND_LE)
+TRANS(vsle_h, do_cmp, 16, MO_16, TCG_COND_LE)
+TRANS(vsle_w, do_cmp, 16, MO_32, TCG_COND_LE)
+TRANS(vsle_d, do_cmp, 16, MO_64, TCG_COND_LE)
+TRANS(vslei_b, do_vslei_s, 16, MO_8)
+TRANS(vslei_h, do_vslei_s, 16, MO_16)
+TRANS(vslei_w, do_vslei_s, 16, MO_32)
+TRANS(vslei_d, do_vslei_s, 16, MO_64)
+TRANS(vsle_bu, do_cmp, 16, MO_8, TCG_COND_LEU)
+TRANS(vsle_hu, do_cmp, 16, MO_16, TCG_COND_LEU)
+TRANS(vsle_wu, do_cmp, 16, MO_32, TCG_COND_LEU)
+TRANS(vsle_du, do_cmp, 16, MO_64, TCG_COND_LEU)
+TRANS(vslei_bu, do_vslei_u, 16, MO_8)
+TRANS(vslei_hu, do_vslei_u, 16, MO_16)
+TRANS(vslei_wu, do_vslei_u, 16, MO_32)
+TRANS(vslei_du, do_vslei_u, 16, MO_64)
+
+TRANS(vslt_b, do_cmp, 16, MO_8, TCG_COND_LT)
+TRANS(vslt_h, do_cmp, 16, MO_16, TCG_COND_LT)
+TRANS(vslt_w, do_cmp, 16, MO_32, TCG_COND_LT)
+TRANS(vslt_d, do_cmp, 16, MO_64, TCG_COND_LT)
+TRANS(vslti_b, do_vslti_s, 16, MO_8)
+TRANS(vslti_h, do_vslti_s, 16, MO_16)
+TRANS(vslti_w, do_vslti_s, 16, MO_32)
+TRANS(vslti_d, do_vslti_s, 16, MO_64)
+TRANS(vslt_bu, do_cmp, 16, MO_8, TCG_COND_LTU)
+TRANS(vslt_hu, do_cmp, 16, MO_16, TCG_COND_LTU)
+TRANS(vslt_wu, do_cmp, 16, MO_32, TCG_COND_LTU)
+TRANS(vslt_du, do_cmp, 16, MO_64, TCG_COND_LTU)
+TRANS(vslti_bu, do_vslti_u, 16, MO_8)
+TRANS(vslti_hu, do_vslti_u, 16, MO_16)
+TRANS(vslti_wu, do_vslti_u, 16, MO_32)
+TRANS(vslti_du, do_vslti_u, 16, MO_64)
static bool trans_vfcmp_cond_s(DisasContext *ctx, arg_vvv_fcond *a)
{
@@ -1915,6 +1915,49 @@ xvffintl_d_w 0111 01101001 11100 00100 ..... ..... @vv
xvffinth_d_w 0111 01101001 11100 00101 ..... ..... @vv
xvffint_s_l 0111 01010100 10000 ..... ..... ..... @vvv
+xvseq_b 0111 01000000 00000 ..... ..... ..... @vvv
+xvseq_h 0111 01000000 00001 ..... ..... ..... @vvv
+xvseq_w 0111 01000000 00010 ..... ..... ..... @vvv
+xvseq_d 0111 01000000 00011 ..... ..... ..... @vvv
+xvseqi_b 0111 01101000 00000 ..... ..... ..... @vv_i5
+xvseqi_h 0111 01101000 00001 ..... ..... ..... @vv_i5
+xvseqi_w 0111 01101000 00010 ..... ..... ..... @vv_i5
+xvseqi_d 0111 01101000 00011 ..... ..... ..... @vv_i5
+
+xvsle_b 0111 01000000 00100 ..... ..... ..... @vvv
+xvsle_h 0111 01000000 00101 ..... ..... ..... @vvv
+xvsle_w 0111 01000000 00110 ..... ..... ..... @vvv
+xvsle_d 0111 01000000 00111 ..... ..... ..... @vvv
+xvslei_b 0111 01101000 00100 ..... ..... ..... @vv_i5
+xvslei_h 0111 01101000 00101 ..... ..... ..... @vv_i5
+xvslei_w 0111 01101000 00110 ..... ..... ..... @vv_i5
+xvslei_d 0111 01101000 00111 ..... ..... ..... @vv_i5
+xvsle_bu 0111 01000000 01000 ..... ..... ..... @vvv
+xvsle_hu 0111 01000000 01001 ..... ..... ..... @vvv
+xvsle_wu 0111 01000000 01010 ..... ..... ..... @vvv
+xvsle_du 0111 01000000 01011 ..... ..... ..... @vvv
+xvslei_bu 0111 01101000 01000 ..... ..... ..... @vv_ui5
+xvslei_hu 0111 01101000 01001 ..... ..... ..... @vv_ui5
+xvslei_wu 0111 01101000 01010 ..... ..... ..... @vv_ui5
+xvslei_du 0111 01101000 01011 ..... ..... ..... @vv_ui5
+
+xvslt_b 0111 01000000 01100 ..... ..... ..... @vvv
+xvslt_h 0111 01000000 01101 ..... ..... ..... @vvv
+xvslt_w 0111 01000000 01110 ..... ..... ..... @vvv
+xvslt_d 0111 01000000 01111 ..... ..... ..... @vvv
+xvslti_b 0111 01101000 01100 ..... ..... ..... @vv_i5
+xvslti_h 0111 01101000 01101 ..... ..... ..... @vv_i5
+xvslti_w 0111 01101000 01110 ..... ..... ..... @vv_i5
+xvslti_d 0111 01101000 01111 ..... ..... ..... @vv_i5
+xvslt_bu 0111 01000000 10000 ..... ..... ..... @vvv
+xvslt_hu 0111 01000000 10001 ..... ..... ..... @vvv
+xvslt_wu 0111 01000000 10010 ..... ..... ..... @vvv
+xvslt_du 0111 01000000 10011 ..... ..... ..... @vvv
+xvslti_bu 0111 01101000 10000 ..... ..... ..... @vv_ui5
+xvslti_hu 0111 01101000 10001 ..... ..... ..... @vv_ui5
+xvslti_wu 0111 01101000 10010 ..... ..... ..... @vv_ui5
+xvslti_du 0111 01101000 10011 ..... ..... ..... @vv_ui5
+
xvreplgr2vr_b 0111 01101001 11110 00000 ..... ..... @vr
xvreplgr2vr_h 0111 01101001 11110 00001 ..... ..... @vr
xvreplgr2vr_w 0111 01101001 11110 00010 ..... ..... @vr
@@ -89,4 +89,8 @@
#define DO_BITSET(a, bit) (a | 1ull << bit)
#define DO_BITREV(a, bit) (a ^ (1ull << bit))
+#define VSEQ(a, b) (a == b ? -1 : 0)
+#define VSLE(a, b) (a <= b ? -1 : 0)
+#define VSLT(a, b) (a < b ? -1 : 0)
+
#endif /* LOONGARCH_VEC_H */
@@ -3091,19 +3091,16 @@ void HELPER(vffint_s_l)(CPULoongArchState *env,
*Vd = temp;
}
-#define VSEQ(a, b) (a == b ? -1 : 0)
-#define VSLE(a, b) (a <= b ? -1 : 0)
-#define VSLT(a, b) (a < b ? -1 : 0)
-
#define VCMPI(NAME, BIT, E, DO_OP) \
void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
{ \
- int i; \
+ int i, len; \
VReg *Vd = (VReg *)vd; \
VReg *Vj = (VReg *)vj; \
typedef __typeof(Vd->E(0)) TD; \
\
- for (i = 0; i < LSX_LEN/BIT; i++) { \
+ len = (simd_oprsz(v) == 16) ? LSX_LEN : LASX_LEN; \
+ for (i = 0; i < len / BIT; i++) { \
Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
} \
}
This patch includes: - XVSEQ[I].{B/H/W/D}; - XVSLE[I].{B/H/W/D}[U]; - XVSLT[I].{B/H/W/D/}[U]. Signed-off-by: Song Gao <gaosong@loongson.cn> --- target/loongarch/disas.c | 43 +++ target/loongarch/insn_trans/trans_lasx.c.inc | 43 +++ target/loongarch/insn_trans/trans_lsx.c.inc | 263 ++++++++++--------- target/loongarch/insns.decode | 43 +++ target/loongarch/vec.h | 4 + target/loongarch/vec_helper.c | 9 +- 6 files changed, 269 insertions(+), 136 deletions(-)