@@ -1384,3 +1384,12 @@ DEF_HELPER_4(kdmabb, tl, env, tl, tl, tl)
DEF_HELPER_4(kdmabt, tl, env, tl, tl, tl)
DEF_HELPER_4(kdmatt, tl, env, tl, tl, tl)
DEF_HELPER_2(kabsw, tl, env, tl)
+
+DEF_HELPER_3(raddw, tl, env, tl, tl)
+DEF_HELPER_3(uraddw, tl, env, tl, tl)
+DEF_HELPER_3(rsubw, tl, env, tl, tl)
+DEF_HELPER_3(ursubw, tl, env, tl, tl)
+DEF_HELPER_3(maxw, tl, env, tl, tl)
+DEF_HELPER_3(minw, tl, env, tl, tl)
+DEF_HELPER_3(mulr64, i64, env, tl, tl)
+DEF_HELPER_3(mulsr64, i64, env, tl, tl)
@@ -988,3 +988,12 @@ kdmabb 1101001 ..... ..... 001 ..... 1110111 @r
kdmabt 1110001 ..... ..... 001 ..... 1110111 @r
kdmatt 1111001 ..... ..... 001 ..... 1110111 @r
kabsw 1010110 10100 ..... 000 ..... 1110111 @r2
+
+raddw 0010000 ..... ..... 001 ..... 1110111 @r
+uraddw 0011000 ..... ..... 001 ..... 1110111 @r
+rsubw 0010001 ..... ..... 001 ..... 1110111 @r
+ursubw 0011001 ..... ..... 001 ..... 1110111 @r
+maxw 1111001 ..... ..... 000 ..... 1110111 @r
+minw 1111000 ..... ..... 000 ..... 1110111 @r
+mulr64 1111000 ..... ..... 001 ..... 1110111 @r
+mulsr64 1110000 ..... ..... 001 ..... 1110111 @r
@@ -698,3 +698,13 @@ GEN_RVP_R_ACC_OOL(kdmabb);
GEN_RVP_R_ACC_OOL(kdmabt);
GEN_RVP_R_ACC_OOL(kdmatt);
GEN_RVP_R2_OOL(kabsw);
+
+/* 32-bit Computation Instructions */
+GEN_RVP_R_OOL(raddw);
+GEN_RVP_R_OOL(uraddw);
+GEN_RVP_R_OOL(rsubw);
+GEN_RVP_R_OOL(ursubw);
+GEN_RVP_R_OOL(minw);
+GEN_RVP_R_OOL(maxw);
+GEN_RVP_R_D64_OOL(mulr64);
+GEN_RVP_R_D64_OOL(mulsr64);
@@ -2818,3 +2818,95 @@ static inline void do_kabsw(CPURISCVState *env, void *vd, void *va, uint8_t i)
}
RVPR2(kabsw, 2, 4);
+
+/* 32-bit Computation Instructions */
+static inline void do_raddw(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int32_t *a = va, *b = vb;
+ target_long *d = vd;
+
+ *d = hadd32(a[H4(i)], b[H4(i)]);
+}
+
+RVPR(raddw, 2, 4);
+
+static inline void do_uraddw(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *a = va, *b = vb;
+ target_long *d = vd;
+
+ *d = (int32_t)haddu32(a[H4(i)], b[H4(i)]);
+}
+
+RVPR(uraddw, 2, 4);
+
+static inline void do_rsubw(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int32_t *a = va, *b = vb;
+ target_long *d = vd;
+
+ *d = hsub32(a[H4(i)], b[H4(i)]);
+}
+
+RVPR(rsubw, 2, 4);
+
+static inline void do_ursubw(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *a = va, *b = vb;
+ target_long *d = vd;
+
+ *d = (int32_t)hsubu64(a[H4(i)], b[H4(i)]);
+}
+
+RVPR(ursubw, 2, 4);
+
+static inline void do_maxw(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ target_long *d = vd;
+ int32_t *a = va, *b = vb;
+
+ *d = (a[H4(i)] > b[H4(i)]) ? a[H4(i)] : b[H4(i)];
+}
+
+RVPR(maxw, 2, 4);
+
+static inline void do_minw(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ target_long *d = vd;
+ int32_t *a = va, *b = vb;
+
+ *d = (a[H4(i)] < b[H4(i)]) ? a[H4(i)] : b[H4(i)];
+}
+
+RVPR(minw, 2, 4);
+
+static inline void do_mulr64(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint64_t *d = vd;
+ uint32_t *a = va, *b = vb;
+
+ *d = (uint64_t)a[H4(0)] * b[H4(0)];
+}
+
+RVPR64(mulr64);
+
+static inline void do_mulsr64(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int32_t *d = vd;
+ int64_t result;
+ int32_t *a = va, *b = vb;
+
+ result = (int64_t)a[H4(0)] * b[H4(0)];
+ d[H4(1)] = result >> 32;
+ d[H4(0)] = result & UINT32_MAX;
+}
+
+RVPR64(mulsr64);
32-bit halving addition or subtraction, maximum, minimum, or multiply. Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com> --- target/riscv/helper.h | 9 +++ target/riscv/insn32.decode | 9 +++ target/riscv/insn_trans/trans_rvp.c.inc | 10 +++ target/riscv/packed_helper.c | 92 +++++++++++++++++++++++++ 4 files changed, 120 insertions(+)