@@ -1218,3 +1218,10 @@ DEF_HELPER_3(scmplt8, tl, env, tl, tl)
DEF_HELPER_3(scmple8, tl, env, tl, tl)
DEF_HELPER_3(ucmplt8, tl, env, tl, tl)
DEF_HELPER_3(ucmple8, tl, env, tl, tl)
+
+DEF_HELPER_3(smul16, i64, env, tl, tl)
+DEF_HELPER_3(smulx16, i64, env, tl, tl)
+DEF_HELPER_3(umul16, i64, env, tl, tl)
+DEF_HELPER_3(umulx16, i64, env, tl, tl)
+DEF_HELPER_3(khm16, tl, env, tl, tl)
+DEF_HELPER_3(khmx16, tl, env, tl, tl)
@@ -821,3 +821,10 @@ scmplt8 0000111 ..... ..... 000 ..... 1110111 @r
scmple8 0001111 ..... ..... 000 ..... 1110111 @r
ucmplt8 0010111 ..... ..... 000 ..... 1110111 @r
ucmple8 0011111 ..... ..... 000 ..... 1110111 @r
+
+smul16 1010000 ..... ..... 000 ..... 1110111 @r
+smulx16 1010001 ..... ..... 000 ..... 1110111 @r
+umul16 1011000 ..... ..... 000 ..... 1110111 @r
+umulx16 1011001 ..... ..... 000 ..... 1110111 @r
+khm16 1000011 ..... ..... 000 ..... 1110111 @r
+khmx16 1001011 ..... ..... 000 ..... 1110111 @r
@@ -217,3 +217,72 @@ GEN_RVP_R_OOL(scmplt8);
GEN_RVP_R_OOL(scmple8);
GEN_RVP_R_OOL(ucmplt8);
GEN_RVP_R_OOL(ucmple8);
+
+/* SIMD 16-bit Multiply Instructions */
+static void set_pair_regs(DisasContext *ctx, TCGv_i64 dst, int rd)
+{
+ TCGv t1, t2;
+
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+
+ if (is_32bit(ctx)) {
+ TCGv_i32 lo, hi;
+
+ lo = tcg_temp_new_i32();
+ hi = tcg_temp_new_i32();
+ tcg_gen_extr_i64_i32(lo, hi, dst);
+
+ tcg_gen_ext_i32_tl(t1, lo);
+ tcg_gen_ext_i32_tl(t2, hi);
+
+ gen_set_gpr(rd, t1);
+ gen_set_gpr(rd + 1, t2);
+ tcg_temp_free_i32(lo);
+ tcg_temp_free_i32(hi);
+ } else {
+ tcg_gen_trunc_i64_tl(t1, dst);
+ gen_set_gpr(rd, t1);
+ }
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+static inline bool
+r_d64_ool(DisasContext *ctx, arg_r *a,
+ void (* fn)(TCGv_i64, TCGv_ptr, TCGv, TCGv))
+{
+ TCGv t1, t2;
+ TCGv_i64 t3;
+
+ if (!has_ext(ctx, RVP) || !ctx->ext_psfoperand) {
+ return false;
+ }
+
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ t3 = tcg_temp_new_i64();
+
+ gen_get_gpr(t1, a->rs1);
+ gen_get_gpr(t2, a->rs2);
+ fn(t3, cpu_env, t1, t2);
+ set_pair_regs(ctx, t3, a->rd);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free_i64(t3);
+ return true;
+}
+
+#define GEN_RVP_R_D64_OOL(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_r *a) \
+{ \
+ return r_d64_ool(s, a, gen_helper_##NAME); \
+}
+
+GEN_RVP_R_D64_OOL(smul16);
+GEN_RVP_R_D64_OOL(smulx16);
+GEN_RVP_R_D64_OOL(umul16);
+GEN_RVP_R_D64_OOL(umulx16);
+GEN_RVP_R_OOL(khm16);
+GEN_RVP_R_OOL(khmx16);
@@ -723,3 +723,107 @@ static inline void do_ucmple8(CPURISCVState *env, void *vd, void *va,
}
RVPR(ucmple8, 1, 1);
+
+/* SIMD 16-bit Multiply Instructions */
+typedef void PackedFn3(CPURISCVState *, void *, void *, void *);
+static inline uint64_t rvpr64(CPURISCVState *env, target_ulong a,
+ target_ulong b, PackedFn3 *fn)
+{
+ uint64_t result;
+
+ fn(env, &result, &a, &b);
+ return result;
+}
+
+#define RVPR64(NAME) \
+uint64_t HELPER(NAME)(CPURISCVState *env, target_ulong a, \
+ target_ulong b) \
+{ \
+ return rvpr64(env, a, b, (PackedFn3 *)do_##NAME); \
+}
+
+static inline void do_smul16(CPURISCVState *env, void *vd, void *va, void *vb)
+{
+ int32_t *d = vd;
+ int16_t *a = va, *b = vb;
+ d[H4(0)] = (int32_t)a[H2(0)] * b[H2(0)];
+ d[H4(1)] = (int32_t)a[H2(1)] * b[H2(1)];
+}
+
+RVPR64(smul16);
+
+static inline void do_smulx16(CPURISCVState *env, void *vd, void *va, void *vb)
+{
+ int32_t *d = vd;
+ int16_t *a = va, *b = vb;
+ d[H4(0)] = (int32_t)a[H2(0)] * b[H2(1)];
+ d[H4(1)] = (int32_t)a[H2(1)] * b[H2(0)];
+}
+
+RVPR64(smulx16);
+
+static inline void do_umul16(CPURISCVState *env, void *vd, void *va, void *vb,
+ uint8_t i)
+{
+ uint32_t *d = vd;
+ uint16_t *a = va, *b = vb;
+ d[H4(0)] = (uint32_t)a[H2(0)] * b[H2(0)];
+ d[H4(1)] = (uint32_t)a[H2(1)] * b[H2(1)];
+}
+
+RVPR64(umul16);
+
+static inline void do_umulx16(CPURISCVState *env, void *vd, void *va, void *vb,
+ uint8_t i)
+{
+ uint32_t *d = vd;
+ uint16_t *a = va, *b = vb;
+ d[H4(0)] = (uint32_t)a[H2(0)] * b[H2(1)];
+ d[H4(1)] = (uint32_t)a[H2(1)] * b[H2(0)];
+}
+
+RVPR64(umulx16);
+
+static inline void do_khm16(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int16_t *d = vd, *a = va, *b = vb;
+
+ if (a[i] == INT16_MIN && b[i] == INT16_MIN) {
+ env->vxsat = 1;
+ d[i] = INT16_MAX;
+ } else {
+ d[i] = (int32_t)a[i] * b[i] >> 15;
+ }
+}
+
+RVPR(khm16, 1, 2);
+
+static inline void do_khmx16(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int16_t *d = vd, *a = va, *b = vb;
+
+ /*
+ * t[x] = ra.H[x] s* rb.H[y];
+ * rt.H[x] = SAT.Q15(t[x] s>> 15);
+ *
+ * (RV32: (x,y)=(1,0),(0,1),
+ * RV64: (x,y)=(3,2),(2,3),
+ * (1,0),(0,1)
+ */
+ if (a[H2(i)] == INT16_MIN && b[H2(i + 1)] == INT16_MIN) {
+ env->vxsat = 1;
+ d[H2(i)] = INT16_MAX;
+ } else {
+ d[H2(i)] = (int32_t)a[H2(i)] * b[H2(i + 1)] >> 15;
+ }
+ if (a[H2(i + 1)] == INT16_MIN && b[H2(i)] == INT16_MIN) {
+ env->vxsat = 1;
+ d[H2(i + 1)] = INT16_MAX;
+ } else {
+ d[H2(i + 1)] = (int32_t)a[H2(i + 1)] * b[H2(i)] >> 15;
+ }
+}
+
+RVPR(khmx16, 2, 2);
There are 6 instructions, including 16-bit signed or unsigned multiply, 16-bit signed or unsigned crossed multiply, Q15 signed or signed crossed saturating multiply. Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com> --- target/riscv/helper.h | 7 ++ target/riscv/insn32.decode | 7 ++ target/riscv/insn_trans/trans_rvp.c.inc | 69 ++++++++++++++++ target/riscv/packed_helper.c | 104 ++++++++++++++++++++++++ 4 files changed, 187 insertions(+)