@@ -2103,6 +2103,22 @@ INSN_LASX(xvsrari_h, xx_i)
INSN_LASX(xvsrari_w, xx_i)
INSN_LASX(xvsrari_d, xx_i)
+INSN_LASX(xvsrln_b_h, xxx)
+INSN_LASX(xvsrln_h_w, xxx)
+INSN_LASX(xvsrln_w_d, xxx)
+INSN_LASX(xvsran_b_h, xxx)
+INSN_LASX(xvsran_h_w, xxx)
+INSN_LASX(xvsran_w_d, xxx)
+
+INSN_LASX(xvsrlni_b_h, xx_i)
+INSN_LASX(xvsrlni_h_w, xx_i)
+INSN_LASX(xvsrlni_w_d, xx_i)
+INSN_LASX(xvsrlni_d_q, xx_i)
+INSN_LASX(xvsrani_b_h, xx_i)
+INSN_LASX(xvsrani_h_w, xx_i)
+INSN_LASX(xvsrani_w_d, xx_i)
+INSN_LASX(xvsrani_d_q, xx_i)
+
INSN_LASX(xvreplgr2vr_b, xr)
INSN_LASX(xvreplgr2vr_h, xr)
INSN_LASX(xvreplgr2vr_w, xr)
@@ -958,3 +958,19 @@ DEF_HELPER_4(xvsrari_b, void, env, i32, i32, i32)
DEF_HELPER_4(xvsrari_h, void, env, i32, i32, i32)
DEF_HELPER_4(xvsrari_w, void, env, i32, i32, i32)
DEF_HELPER_4(xvsrari_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(xvsrln_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(xvsrln_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(xvsrln_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(xvsran_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(xvsran_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(xvsran_w_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(xvsrlni_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(xvsrlni_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(xvsrlni_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(xvsrlni_d_q, void, env, i32, i32, i32)
+DEF_HELPER_4(xvsrani_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(xvsrani_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(xvsrani_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(xvsrani_d_q, void, env, i32, i32, i32)
@@ -2052,6 +2052,22 @@ TRANS(xvsrari_h, gen_xx_i, gen_helper_xvsrari_h)
TRANS(xvsrari_w, gen_xx_i, gen_helper_xvsrari_w)
TRANS(xvsrari_d, gen_xx_i, gen_helper_xvsrari_d)
+TRANS(xvsrln_b_h, gen_xxx, gen_helper_xvsrln_b_h)
+TRANS(xvsrln_h_w, gen_xxx, gen_helper_xvsrln_h_w)
+TRANS(xvsrln_w_d, gen_xxx, gen_helper_xvsrln_w_d)
+TRANS(xvsran_b_h, gen_xxx, gen_helper_xvsran_b_h)
+TRANS(xvsran_h_w, gen_xxx, gen_helper_xvsran_h_w)
+TRANS(xvsran_w_d, gen_xxx, gen_helper_xvsran_w_d)
+
+TRANS(xvsrlni_b_h, gen_xx_i, gen_helper_xvsrlni_b_h)
+TRANS(xvsrlni_h_w, gen_xx_i, gen_helper_xvsrlni_h_w)
+TRANS(xvsrlni_w_d, gen_xx_i, gen_helper_xvsrlni_w_d)
+TRANS(xvsrlni_d_q, gen_xx_i, gen_helper_xvsrlni_d_q)
+TRANS(xvsrani_b_h, gen_xx_i, gen_helper_xvsrani_b_h)
+TRANS(xvsrani_h_w, gen_xx_i, gen_helper_xvsrani_h_w)
+TRANS(xvsrani_w_d, gen_xx_i, gen_helper_xvsrani_w_d)
+TRANS(xvsrani_d_q, gen_xx_i, gen_helper_xvsrani_d_q)
+
static bool gvec_dupx(DisasContext *ctx, arg_xr *a, MemOp mop)
{
TCGv src = gpr_src(ctx, a->rj, EXT_NONE);
@@ -1320,6 +1320,7 @@ vstelm_b 0011 000110 .... ........ ..... ..... @vr_i8i4
@xx_ui4 .... ........ ..... . imm:4 xj:5 xd:5 &xx_i
@xx_ui5 .... ........ ..... imm:5 xj:5 xd:5 &xx_i
@xx_ui6 .... ........ .... imm:6 xj:5 xd:5 &xx_i
+@xx_ui7 .... ........ ... imm:7 xj:5 xd:5 &xx_i
@xx_ui8 .... ........ .. imm:8 xj:5 xd:5 &xx_i
xvadd_b 0111 01000000 10100 ..... ..... ..... @xxx
@@ -1700,6 +1701,22 @@ xvsrari_h 0111 01101010 10000 1 .... ..... ..... @xx_ui4
xvsrari_w 0111 01101010 10001 ..... ..... ..... @xx_ui5
xvsrari_d 0111 01101010 1001 ...... ..... ..... @xx_ui6
+xvsrln_b_h 0111 01001111 01001 ..... ..... ..... @xxx
+xvsrln_h_w 0111 01001111 01010 ..... ..... ..... @xxx
+xvsrln_w_d 0111 01001111 01011 ..... ..... ..... @xxx
+xvsran_b_h 0111 01001111 01101 ..... ..... ..... @xxx
+xvsran_h_w 0111 01001111 01110 ..... ..... ..... @xxx
+xvsran_w_d 0111 01001111 01111 ..... ..... ..... @xxx
+
+xvsrlni_b_h 0111 01110100 00000 1 .... ..... ..... @xx_ui4
+xvsrlni_h_w 0111 01110100 00001 ..... ..... ..... @xx_ui5
+xvsrlni_w_d 0111 01110100 0001 ...... ..... ..... @xx_ui6
+xvsrlni_d_q 0111 01110100 001 ....... ..... ..... @xx_ui7
+xvsrani_b_h 0111 01110101 10000 1 .... ..... ..... @xx_ui4
+xvsrani_h_w 0111 01110101 10001 ..... ..... ..... @xx_ui5
+xvsrani_w_d 0111 01110101 1001 ...... ..... ..... @xx_ui6
+xvsrani_d_q 0111 01110101 101 ....... ..... ..... @xx_ui7
+
xvreplgr2vr_b 0111 01101001 11110 00000 ..... ..... @xr
xvreplgr2vr_h 0111 01101001 11110 00001 ..... ..... @xr
xvreplgr2vr_w 0111 01101001 11110 00010 ..... ..... @xr
@@ -964,3 +964,131 @@ XVSRARI(xvsrari_b, 8, XB)
XVSRARI(xvsrari_h, 16, XH)
XVSRARI(xvsrari_w, 32, XW)
XVSRARI(xvsrari_d, 64, XD)
+
+#define XVSRLN(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t xd, uint32_t xj, uint32_t xk) \
+{ \
+ int i, max; \
+ XReg *Xd = &(env->fpr[xd].xreg); \
+ XReg *Xj = &(env->fpr[xj].xreg); \
+ XReg *Xk = &(env->fpr[xk].xreg); \
+ \
+ max = LASX_LEN / (BIT * 2); \
+ for (i = 0; i < max; i++) { \
+ Xd->E1(i) = R_SHIFT(Xj->E2(i), (Xk->E2(i)) % BIT); \
+ Xd->E1(i + max * 2) = R_SHIFT(Xj->E2(i + max), \
+ Xk->E2(i + max) % BIT); \
+ } \
+ Xd->XD(1) = 0; \
+ Xd->XD(3) = 0; \
+}
+
+XVSRLN(xvsrln_b_h, 16, XB, UXH)
+XVSRLN(xvsrln_h_w, 32, XH, UXW)
+XVSRLN(xvsrln_w_d, 64, XW, UXD)
+
+#define XVSRAN(NAME, BIT, E1, E2, E3) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t xd, uint32_t xj, uint32_t xk) \
+{ \
+ int i, max; \
+ XReg *Xd = &(env->fpr[xd].xreg); \
+ XReg *Xj = &(env->fpr[xj].xreg); \
+ XReg *Xk = &(env->fpr[xk].xreg); \
+ \
+ max = LASX_LEN / (BIT * 2); \
+ for (i = 0; i < max; i++) { \
+ Xd->E1(i) = R_SHIFT(Xj->E2(i), (Xk->E3(i)) % BIT); \
+ Xd->E1(i + max * 2) = R_SHIFT(Xj->E2(i + max), \
+ Xk->E3(i + max) % BIT); \
+ } \
+ Xd->XD(1) = 0; \
+ Xd->XD(3) = 0; \
+}
+
+XVSRAN(xvsran_b_h, 16, XB, XH, UXH)
+XVSRAN(xvsran_h_w, 32, XH, XW, UXW)
+XVSRAN(xvsran_w_d, 64, XW, XD, UXD)
+
+#define XVSRLNI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t xd, uint32_t xj, uint32_t imm) \
+{ \
+ int i, max; \
+ XReg temp; \
+ XReg *Xd = &(env->fpr[xd].xreg); \
+ XReg *Xj = &(env->fpr[xj].xreg); \
+ \
+ temp.XQ(0) = int128_zero(); \
+ temp.XQ(1) = int128_zero(); \
+ max = LASX_LEN / (BIT * 2); \
+ for (i = 0; i < max; i++) { \
+ temp.E1(i) = R_SHIFT(Xj->E2(i), imm); \
+ temp.E1(i + max) = R_SHIFT(Xd->E2(i), imm); \
+ temp.E1(i + max * 2) = R_SHIFT(Xj->E2(i + max), imm); \
+ temp.E1(i + max * 3) = R_SHIFT(Xd->E2(i + max), imm); \
+ } \
+ *Xd = temp; \
+}
+
+void HELPER(xvsrlni_d_q)(CPULoongArchState *env,
+ uint32_t xd, uint32_t xj, uint32_t imm)
+{
+ XReg temp;
+ XReg *Xd = &(env->fpr[xd].xreg);
+ XReg *Xj = &(env->fpr[xj].xreg);
+
+ temp.XQ(0) = int128_zero();
+ temp.XQ(1) = int128_zero();
+ temp.XD(0) = int128_getlo(int128_urshift(Xj->XQ(0), imm % 128));
+ temp.XD(1) = int128_getlo(int128_urshift(Xd->XQ(0), imm % 128));
+ temp.XD(2) = int128_getlo(int128_urshift(Xj->XQ(1), imm % 128));
+ temp.XD(3) = int128_getlo(int128_urshift(Xd->XQ(1), imm % 128));
+ *Xd = temp;
+}
+
+XVSRLNI(xvsrlni_b_h, 16, XB, UXH)
+XVSRLNI(xvsrlni_h_w, 32, XH, UXW)
+XVSRLNI(xvsrlni_w_d, 64, XW, UXD)
+
+#define XVSRANI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t xd, uint32_t xj, uint32_t imm) \
+{ \
+ int i, max; \
+ XReg temp; \
+ XReg *Xd = &(env->fpr[xd].xreg); \
+ XReg *Xj = &(env->fpr[xj].xreg); \
+ \
+ temp.XQ(0) = int128_zero(); \
+ temp.XQ(1) = int128_zero(); \
+ max = LASX_LEN / (BIT * 2); \
+ for (i = 0; i < max; i++) { \
+ temp.E1(i) = R_SHIFT(Xj->E2(i), imm); \
+ temp.E1(i + max) = R_SHIFT(Xd->E2(i), imm); \
+ temp.E1(i + max * 2) = R_SHIFT(Xj->E2(i + max), imm); \
+ temp.E1(i + max * 3) = R_SHIFT(Xd->E2(i + max), imm); \
+ } \
+ *Xd = temp; \
+}
+
+void HELPER(xvsrani_d_q)(CPULoongArchState *env,
+ uint32_t xd, uint32_t xj, uint32_t imm)
+{
+ XReg temp;
+ XReg *Xd = &(env->fpr[xd].xreg);
+ XReg *Xj = &(env->fpr[xj].xreg);
+
+ temp.XQ(0) = int128_zero();
+ temp.XQ(1) = int128_zero();
+ temp.XD(0) = int128_getlo(int128_rshift(Xj->XQ(0), imm % 128));
+ temp.XD(1) = int128_getlo(int128_rshift(Xd->XQ(0), imm % 128));
+ temp.XD(2) = int128_getlo(int128_rshift(Xj->XQ(1), imm % 128));
+ temp.XD(3) = int128_getlo(int128_rshift(Xd->XQ(1), imm % 128));
+ *Xd = temp;
+}
+
+XVSRANI(xvsrani_b_h, 16, XB, XH)
+XVSRANI(xvsrani_h_w, 32, XH, XW)
+XVSRANI(xvsrani_w_d, 64, XW, XD)
@@ -922,8 +922,6 @@ VSRARI(vsrari_h, 16, H)
VSRARI(vsrari_w, 32, W)
VSRARI(vsrari_d, 64, D)
-#define R_SHIFT(a, b) (a >> b)
-
#define VSRLN(NAME, BIT, T, E1, E2) \
void HELPER(NAME)(CPULoongArchState *env, \
uint32_t vd, uint32_t vj, uint32_t vk) \
@@ -75,6 +75,8 @@
#define DO_SIGNCOV(a, b) (a == 0 ? 0 : a < 0 ? -b : b)
+#define R_SHIFT(a, b) (a >> b)
+
uint64_t do_vmskltz_b(int64_t val);
uint64_t do_vmskltz_h(int64_t val);
uint64_t do_vmskltz_w(int64_t val);
This patch includes: - XVSRLN.{B.H/H.W/W.D}; - XVSRAN.{B.H/H.W/W.D}; - XVSRLNI.{B.H/H.W/W.D/D.Q}; - XVSRANI.{B.H/H.W/W.D/D.Q}. Signed-off-by: Song Gao <gaosong@loongson.cn> --- target/loongarch/disas.c | 16 +++ target/loongarch/helper.h | 16 +++ target/loongarch/insn_trans/trans_lasx.c.inc | 16 +++ target/loongarch/insns.decode | 17 +++ target/loongarch/lasx_helper.c | 128 +++++++++++++++++++ target/loongarch/lsx_helper.c | 2 - target/loongarch/vec.h | 2 + 7 files changed, 195 insertions(+), 2 deletions(-)