@@ -1194,3 +1194,5 @@ DEF_HELPER_5(vaeskf1_vi, void, ptr, ptr, i32, env, i32)
DEF_HELPER_5(vaeskf2_vi, void, ptr, ptr, i32, env, i32)
DEF_HELPER_5(vsha2ms_vv, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsha2ch_vv, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsha2cl_vv, void, ptr, ptr, ptr, env, i32)
@@ -924,3 +924,5 @@ vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1
# *** RV64 Zvknh vector crypto extension ***
vsha2ms_vv 101101 1 ..... ..... 010 ..... 1110111 @r_vm_1
+vsha2ch_vv 101110 1 ..... ..... 010 ..... 1110111 @r_vm_1
+vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
@@ -43,3 +43,5 @@ static bool vsha_check(DisasContext *s, arg_rmrr *a)
}
GEN_VV_UNMASKED_TRANS(vsha2ms_vv, vsha_check)
+GEN_VV_UNMASKED_TRANS(vsha2cl_vv, vsha_check)
+GEN_VV_UNMASKED_TRANS(vsha2ch_vv, vsha_check)
@@ -541,3 +541,156 @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
env->vstart = 0;
}
+
+static inline uint64_t sum0_64(uint64_t x)
+{
+ return ror64(x, 28) ^ ror64(x, 34) ^ ror64(x, 39);
+}
+
+static inline uint32_t sum0_32(uint32_t x)
+{
+ return ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22);
+}
+
+static inline uint64_t sum1_64(uint64_t x)
+{
+ return ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41);
+}
+
+static inline uint32_t sum1_32(uint32_t x)
+{
+ return ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25);
+}
+
+#define ch(x, y, z) ((x & y) ^ ((~x) & z))
+
+#define maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
+
+static void vsha2c_64(uint64_t *vs2, uint64_t *vd, uint64_t *vs1)
+{
+ uint64_t a = vs2[3], b = vs2[2], e = vs2[1], f = vs2[0];
+ uint64_t c = vd[3], d = vd[2], g = vd[1], h = vd[0];
+ uint64_t W0 = vs1[0], W1 = vs1[1];
+ uint64_t T1 = h + sum1_64(e) + ch(e, f, g) + W0;
+ uint64_t T2 = sum0_64(a) + maj(a, b, c);
+
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ T1 = h + sum1_64(e) + ch(e, f, g) + W1;
+ T2 = sum0_64(a) + maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ vd[0] = f;
+ vd[1] = e;
+ vd[2] = b;
+ vd[3] = a;
+}
+
+static void vsha2c_32(uint32_t *vs2, uint32_t *vd, uint32_t *vs1)
+{
+ uint32_t a = vs2[H4(3)], b = vs2[H4(2)], e = vs2[H4(1)], f = vs2[H4(0)];
+ uint32_t c = vd[H4(3)], d = vd[H4(2)], g = vd[H4(1)], h = vd[H4(0)];
+ uint32_t W0 = vs1[H4(0)], W1 = vs1[H4(1)];
+ uint32_t T1 = h + sum1_32(e) + ch(e, f, g) + W0;
+ uint32_t T2 = sum0_32(a) + maj(a, b, c);
+
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ T1 = h + sum1_32(e) + ch(e, f, g) + W1;
+ T2 = sum0_32(a) + maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ vd[H4(0)] = f;
+ vd[H4(1)] = e;
+ vd[H4(2)] = b;
+ vd[H4(3)] = a;
+
+}
+
+void HELPER(vsha2ch_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
+ uint32_t desc)
+{
+ uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
+ uint32_t esz = 0;
+ uint32_t total_elems;
+ uint32_t vta = vext_vta(desc);
+
+ if (env->vl % 4 != 0) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ }
+
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
+ if (sew == MO_64) {
+ esz = 8;
+ vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
+ ((uint64_t *)vs1) + 4 * i + 2);
+ } else {
+ esz = 4;
+ vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
+ ((uint32_t *)vs1) + 4 * i + 2);
+ }
+ }
+
+ /* set tail elements to 1s */
+ total_elems = vext_get_total_elems(env, desc, esz);
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
+ env->vstart = 0;
+}
+
+void HELPER(vsha2cl_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
+ uint32_t desc)
+{
+ uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
+ uint32_t esz = 0;
+ uint32_t total_elems;
+ uint32_t vta = vext_vta(desc);
+
+ if (env->vl % 4 != 0) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ }
+
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
+ if (sew == MO_64) {
+ esz = 8;
+ vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
+ (((uint64_t *)vs1) + 4 * i));
+ } else {
+ esz = 4;
+ vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
+ (((uint32_t *)vs1) + 4 * i));
+ }
+ }
+
+ /* set tail elements to 1s */
+ total_elems = vext_get_total_elems(env, desc, esz);
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
+ env->vstart = 0;
+}