@@ -1571,6 +1571,15 @@ xvsat_hu 0111 01110010 10000 1 .... ..... ..... @vv_ui4
xvsat_wu 0111 01110010 10001 ..... ..... ..... @vv_ui5
xvsat_du 0111 01110010 1001 ...... ..... ..... @vv_ui6
+xvexth_h_b 0111 01101001 11101 11000 ..... ..... @vv
+xvexth_w_h 0111 01101001 11101 11001 ..... ..... @vv
+xvexth_d_w 0111 01101001 11101 11010 ..... ..... @vv
+xvexth_q_d 0111 01101001 11101 11011 ..... ..... @vv
+xvexth_hu_bu 0111 01101001 11101 11100 ..... ..... @vv
+xvexth_wu_hu 0111 01101001 11101 11101 ..... ..... @vv
+xvexth_du_wu 0111 01101001 11101 11110 ..... ..... @vv
+xvexth_qu_du 0111 01101001 11101 11111 ..... ..... @vv
+
xvreplgr2vr_b 0111 01101001 11110 00000 ..... ..... @vr
xvreplgr2vr_h 0111 01101001 11110 00001 ..... ..... @vr
xvreplgr2vr_w 0111 01101001 11110 00010 ..... ..... @vr
@@ -1988,6 +1988,15 @@ INSN_LASX(xvsat_hu, vv_i)
INSN_LASX(xvsat_wu, vv_i)
INSN_LASX(xvsat_du, vv_i)
+INSN_LASX(xvexth_h_b, vv)
+INSN_LASX(xvexth_w_h, vv)
+INSN_LASX(xvexth_d_w, vv)
+INSN_LASX(xvexth_q_d, vv)
+INSN_LASX(xvexth_hu_bu, vv)
+INSN_LASX(xvexth_wu_hu, vv)
+INSN_LASX(xvexth_du_wu, vv)
+INSN_LASX(xvexth_qu_du, vv)
+
INSN_LASX(xvreplgr2vr_b, vr)
INSN_LASX(xvreplgr2vr_h, vr)
INSN_LASX(xvreplgr2vr_w, vr)
@@ -716,32 +716,44 @@ VSAT_U(vsat_hu, 16, UH)
VSAT_U(vsat_wu, 32, UW)
VSAT_U(vsat_du, 64, UD)
-#define VEXTH(NAME, BIT, E1, E2) \
-void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = Vj->E2(i + LSX_LEN/BIT); \
- } \
+#define VEXTH(NAME, BIT, E1, E2) \
+void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
+{ \
+ int i, j, ofs; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ int oprsz = simd_oprsz(desc); \
+ \
+ ofs = LSX_LEN / BIT; \
+ for (i = 0; i < oprsz / 16; i++) { \
+ for (j = 0; j < ofs; j++) { \
+ Vd->E1(j + i * ofs) = Vj->E2(j + ofs + ofs * 2 * i); \
+ } \
+ } \
}
void HELPER(vexth_q_d)(void *vd, void *vj, uint32_t desc)
{
+ int i;
VReg *Vd = (VReg *)vd;
VReg *Vj = (VReg *)vj;
+ int oprsz = simd_oprsz(desc);
- Vd->Q(0) = int128_makes64(Vj->D(1));
+ for (i = 0; i < oprsz / 16; i++) {
+ Vd->Q(i) = int128_makes64(Vj->D(2 * i + 1));
+ }
}
void HELPER(vexth_qu_du)(void *vd, void *vj, uint32_t desc)
{
+ int i;
VReg *Vd = (VReg *)vd;
VReg *Vj = (VReg *)vj;
+ int oprsz = simd_oprsz(desc);
- Vd->Q(0) = int128_make64((uint64_t)Vj->D(1));
+ for (i = 0; i < oprsz / 16; i++) {
+ Vd->Q(i) = int128_make64(Vj->UD(2 * i + 1));
+ }
}
VEXTH(vexth_h_b, 16, H, B)
@@ -141,6 +141,10 @@ static bool gen_vv_ptr(DisasContext *ctx, arg_vv *a,
static bool gen_vv_vl(DisasContext *ctx, arg_vv *a, uint32_t oprsz,
gen_helper_gvec_2 *fn)
{
+ if (!check_vec(ctx, oprsz)) {
+ return true;
+ }
+
tcg_gen_gvec_2_ool(vec_full_offset(a->vd),
vec_full_offset(a->vj),
oprsz, ctx->vl / 8, 0, fn);
@@ -149,13 +153,14 @@ static bool gen_vv_vl(DisasContext *ctx, arg_vv *a, uint32_t oprsz,
static bool gen_vv(DisasContext *ctx, arg_vv *a, gen_helper_gvec_2 *fn)
{
- if (!check_vec(ctx, 16)) {
- return true;
- }
-
return gen_vv_vl(ctx, a, 16, fn);
}
+static bool gen_xx(DisasContext *ctx, arg_vv *a, gen_helper_gvec_2 *fn)
+{
+ return gen_vv_vl(ctx, a, 32, fn);
+}
+
static bool gen_vv_i_vl(DisasContext *ctx, arg_vv_i *a, uint32_t oprsz,
gen_helper_gvec_2i *fn)
{
@@ -3333,6 +3338,14 @@ TRANS(vexth_hu_bu, LSX, gen_vv, gen_helper_vexth_hu_bu)
TRANS(vexth_wu_hu, LSX, gen_vv, gen_helper_vexth_wu_hu)
TRANS(vexth_du_wu, LSX, gen_vv, gen_helper_vexth_du_wu)
TRANS(vexth_qu_du, LSX, gen_vv, gen_helper_vexth_qu_du)
+TRANS(xvexth_h_b, LASX, gen_xx, gen_helper_vexth_h_b)
+TRANS(xvexth_w_h, LASX, gen_xx, gen_helper_vexth_w_h)
+TRANS(xvexth_d_w, LASX, gen_xx, gen_helper_vexth_d_w)
+TRANS(xvexth_q_d, LASX, gen_xx, gen_helper_vexth_q_d)
+TRANS(xvexth_hu_bu, LASX, gen_xx, gen_helper_vexth_hu_bu)
+TRANS(xvexth_wu_hu, LASX, gen_xx, gen_helper_vexth_wu_hu)
+TRANS(xvexth_du_wu, LASX, gen_xx, gen_helper_vexth_du_wu)
+TRANS(xvexth_qu_du, LASX, gen_xx, gen_helper_vexth_qu_du)
static void gen_vsigncov(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
{