@@ -1889,9 +1889,9 @@ GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
GEN_OPIVI_TRANS(vmseq_vi, 0, vmseq_vx, opivx_cmp_check)
GEN_OPIVI_TRANS(vmsne_vi, 0, vmsne_vx, opivx_cmp_check)
-GEN_OPIVI_TRANS(vmsleu_vi, 1, vmsleu_vx, opivx_cmp_check)
+GEN_OPIVI_TRANS(vmsleu_vi, 0, vmsleu_vx, opivx_cmp_check)
GEN_OPIVI_TRANS(vmsle_vi, 0, vmsle_vx, opivx_cmp_check)
-GEN_OPIVI_TRANS(vmsgtu_vi, 1, vmsgtu_vx, opivx_cmp_check)
+GEN_OPIVI_TRANS(vmsgtu_vi, 0, vmsgtu_vx, opivx_cmp_check)
GEN_OPIVI_TRANS(vmsgt_vi, 0, vmsgt_vx, opivx_cmp_check)
/* Vector Integer Min/Max Instructions */
@@ -1541,26 +1541,29 @@ GEN_VEXT_SHIFT_VX(vnsra_wx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
#define DO_MSLE(N, M) (N <= M)
#define DO_MSGT(N, M) (N > M)
-#define GEN_VEXT_CMP_VV(NAME, ETYPE, H, DO_OP) \
-void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- uint32_t vm = vext_vm(desc); \
- uint32_t vl = env->vl; \
- uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
- uint32_t i; \
- \
- for (i = 0; i < vl; i++) { \
- ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
- ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
- if (!vm && !vext_elem_mask(v0, i)) { \
- continue; \
- } \
- vext_set_elem_mask(vd, i, DO_OP(s2, s1)); \
- } \
- for (; i < vlmax; i++) { \
- vext_set_elem_mask(vd, i, 0); \
- } \
+#define GEN_VEXT_CMP_VV(NAME, ETYPE, H, DO_OP) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t vlmax = vext_max_elems(desc, sizeof(ETYPE), false); \
+ uint32_t vta = vext_vta(desc); \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ if (!vm && !vext_elem_mask(v0, i)) { \
+ continue; \
+ } \
+ vext_set_elem_mask(vd, i, DO_OP(s2, s1)); \
+ } \
+ if (vta == 1) { \
+ for (; i < vlmax; i++) { \
+ vext_set_elem_mask(vd, i, 0); \
+ } \
+ } \
}
GEN_VEXT_CMP_VV(vmseq_vv_b, uint8_t, H1, DO_MSEQ)
@@ -1593,26 +1596,29 @@ GEN_VEXT_CMP_VV(vmsle_vv_h, int16_t, H2, DO_MSLE)
GEN_VEXT_CMP_VV(vmsle_vv_w, int32_t, H4, DO_MSLE)
GEN_VEXT_CMP_VV(vmsle_vv_d, int64_t, H8, DO_MSLE)
-#define GEN_VEXT_CMP_VX(NAME, ETYPE, H, DO_OP) \
-void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- uint32_t vm = vext_vm(desc); \
- uint32_t vl = env->vl; \
- uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
- uint32_t i; \
- \
- for (i = 0; i < vl; i++) { \
- ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
- if (!vm && !vext_elem_mask(v0, i)) { \
- continue; \
- } \
- vext_set_elem_mask(vd, i, \
- DO_OP(s2, (ETYPE)(target_long)s1)); \
- } \
- for (; i < vlmax; i++) { \
- vext_set_elem_mask(vd, i, 0); \
- } \
+#define GEN_VEXT_CMP_VX(NAME, ETYPE, H, DO_OP) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t vlmax = vext_max_elems(desc, sizeof(ETYPE), false); \
+ uint32_t vta = vext_vta(desc); \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ if (!vm && !vext_elem_mask(v0, i)) { \
+ continue; \
+ } \
+ vext_set_elem_mask(vd, i, \
+ DO_OP(s2, (ETYPE)(target_long)s1)); \
+ } \
+ if (vta == 1) { \
+ for (; i < vlmax; i++) { \
+ vext_set_elem_mask(vd, i, 0); \
+ } \
+ } \
}
GEN_VEXT_CMP_VX(vmseq_vx_b, uint8_t, H1, DO_MSEQ)