@@ -1350,3 +1350,14 @@ DEF_HELPER_4(kmar64, i64, env, tl, tl, i64)
DEF_HELPER_4(kmsr64, i64, env, tl, tl, i64)
DEF_HELPER_4(ukmar64, i64, env, tl, tl, i64)
DEF_HELPER_4(ukmsr64, i64, env, tl, tl, i64)
+
+DEF_HELPER_4(smalbb, i64, env, tl, tl, i64)
+DEF_HELPER_4(smalbt, i64, env, tl, tl, i64)
+DEF_HELPER_4(smaltt, i64, env, tl, tl, i64)
+DEF_HELPER_4(smalda, i64, env, tl, tl, i64)
+DEF_HELPER_4(smalxda, i64, env, tl, tl, i64)
+DEF_HELPER_4(smalds, i64, env, tl, tl, i64)
+DEF_HELPER_4(smalxds, i64, env, tl, tl, i64)
+DEF_HELPER_4(smaldrs, i64, env, tl, tl, i64)
+DEF_HELPER_4(smslda, i64, env, tl, tl, i64)
+DEF_HELPER_4(smslxda, i64, env, tl, tl, i64)
@@ -953,3 +953,14 @@ kmar64 1001010 ..... ..... 001 ..... 1110111 @r
kmsr64 1001011 ..... ..... 001 ..... 1110111 @r
ukmar64 1011010 ..... ..... 001 ..... 1110111 @r
ukmsr64 1011011 ..... ..... 001 ..... 1110111 @r
+
+smalbb 1000100 ..... ..... 001 ..... 1110111 @r
+smalbt 1001100 ..... ..... 001 ..... 1110111 @r
+smaltt 1010100 ..... ..... 001 ..... 1110111 @r
+smalda 1000110 ..... ..... 001 ..... 1110111 @r
+smalxda 1001110 ..... ..... 001 ..... 1110111 @r
+smalds 1000101 ..... ..... 001 ..... 1110111 @r
+smaldrs 1001101 ..... ..... 001 ..... 1110111 @r
+smalxds 1010101 ..... ..... 001 ..... 1110111 @r
+smslda 1010110 ..... ..... 001 ..... 1110111 @r
+smslxda 1011110 ..... ..... 001 ..... 1110111 @r
@@ -657,3 +657,15 @@ GEN_RVP_R_D64_ACC_OOL(kmar64);
GEN_RVP_R_D64_ACC_OOL(kmsr64);
GEN_RVP_R_D64_ACC_OOL(ukmar64);
GEN_RVP_R_D64_ACC_OOL(ukmsr64);
+
+/* Signed 16-bit Multiply with 64-bit Add/Subtract Instructions */
+GEN_RVP_R_D64_ACC_OOL(smalbb);
+GEN_RVP_R_D64_ACC_OOL(smalbt);
+GEN_RVP_R_D64_ACC_OOL(smaltt);
+GEN_RVP_R_D64_ACC_OOL(smalda);
+GEN_RVP_R_D64_ACC_OOL(smalxda);
+GEN_RVP_R_D64_ACC_OOL(smalds);
+GEN_RVP_R_D64_ACC_OOL(smaldrs);
+GEN_RVP_R_D64_ACC_OOL(smalxds);
+GEN_RVP_R_D64_ACC_OOL(smslda);
+GEN_RVP_R_D64_ACC_OOL(smslxda);
@@ -2375,3 +2375,154 @@ static inline void do_ukmsr64(CPURISCVState *env, void *vd, void *va,
}
RVPR64_ACC(ukmsr64, 1, 4);
+
+/* Signed 16-bit Multiply with 64-bit Add/Subtract Instructions */
+static inline void do_smalbb(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int64_t *d = vd, *c = vc;
+ int16_t *a = va, *b = vb;
+
+ if (i == 0) {
+ *d = *c;
+ }
+
+ *d += (int64_t)a[H2(i)] * b[H2(i)];
+}
+
+RVPR64_ACC(smalbb, 2, 2);
+
+static inline void do_smalbt(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int64_t *d = vd, *c = vc;
+ int16_t *a = va, *b = vb;
+
+ if (i == 0) {
+ *d = *c;
+ }
+
+ *d += (int64_t)a[H2(i)] * b[H2(i + 1)];
+}
+
+RVPR64_ACC(smalbt, 2, 2);
+
+static inline void do_smaltt(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int64_t *d = vd, *c = vc;
+ int16_t *a = va, *b = vb;
+
+ if (i == 0) {
+ *d = *c;
+ }
+
+ *d += (int64_t)a[H2(i + 1)] * b[H2(i + 1)];
+}
+
+RVPR64_ACC(smaltt, 2, 2);
+
+static inline void do_smalda(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int64_t *d = vd, *c = vc;
+ int16_t *a = va, *b = vb;
+
+ if (i == 0) {
+ *d = *c;
+ }
+
+ *d += (int64_t)a[H2(i)] * b[H2(i)] + (int64_t)a[H2(i + 1)] * b[H2(i + 1)];
+}
+
+RVPR64_ACC(smalda, 2, 2);
+
+static inline void do_smalxda(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int64_t *d = vd, *c = vc;
+ int16_t *a = va, *b = vb;
+
+ if (i == 0) {
+ *d = *c;
+ }
+
+ *d += (int64_t)a[H2(i)] * b[H2(i + 1)] + (int64_t)a[H2(i + 1)] * b[H2(i)];
+}
+
+RVPR64_ACC(smalxda, 2, 2);
+
+static inline void do_smalds(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int64_t *d = vd, *c = vc;
+ int16_t *a = va, *b = vb;
+
+ if (i == 0) {
+ *d = *c;
+ }
+
+ *d += (int64_t)a[H2(i + 1)] * b[H2(i + 1)] - (int64_t)a[H2(i)] * b[H2(i)];
+}
+
+RVPR64_ACC(smalds, 2, 2);
+
+static inline void do_smaldrs(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int64_t *d = vd, *c = vc;
+ int16_t *a = va, *b = vb;
+
+ if (i == 0) {
+ *d = *c;
+ }
+
+ *d += (int64_t)a[H2(i)] * b[H2(i)] - (int64_t)a[H2(i + 1)] * b[H2(i + 1)];
+}
+
+RVPR64_ACC(smaldrs, 2, 2);
+
+static inline void do_smalxds(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int64_t *d = vd, *c = vc;
+ int16_t *a = va, *b = vb;
+
+ if (i == 0) {
+ *d = *c;
+ }
+
+ *d += (int64_t)a[H2(i + 1)] * b[H2(i)] - (int64_t)a[H2(i)] * b[H2(i + 1)];
+}
+
+RVPR64_ACC(smalxds, 2, 2);
+
+static inline void do_smslda(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int64_t *d = vd, *c = vc;
+ int16_t *a = va, *b = vb;
+
+ if (i == 0) {
+ *d = *c;
+ }
+
+ *d -= (int64_t)a[H2(i)] * b[H2(i)] + (int64_t)a[H2(i + 1)] * b[H2(i + 1)];
+}
+
+RVPR64_ACC(smslda, 2, 2);
+
+static inline void do_smslxda(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int64_t *d = vd, *c = vc;
+ int16_t *a = va, *b = vb;
+
+ if (i == 0) {
+ *d = *c;
+ }
+
+ *d -= (int64_t)a[H2(i + 1)] * b[H2(i)] + (int64_t)a[H2(i)] * b[H2(i + 1)];
+}
+
+RVPR64_ACC(smslxda, 2, 2);
one or two 16x16 multiply as operands for an add/subtract operation with another 64-bit operand. Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com> --- target/riscv/helper.h | 11 ++ target/riscv/insn32.decode | 11 ++ target/riscv/insn_trans/trans_rvp.c.inc | 12 ++ target/riscv/packed_helper.c | 151 ++++++++++++++++++++++++ 4 files changed, 185 insertions(+)