@@ -1089,3 +1089,6 @@ DEF_HELPER_6(vfredmax_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfredmin_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfredmin_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_6(vfwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
@@ -545,6 +545,8 @@ vwredsum_vs 110001 . ..... ..... 000 ..... 1010111 @r_vm
vfredsum_vs 0000-1 . ..... ..... 001 ..... 1010111 @r_vm
vfredmin_vs 000101 . ..... ..... 001 ..... 1010111 @r_vm
vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
+# Vector widening ordered and unordered float reduction sum
+vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
@@ -2351,3 +2351,6 @@ GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_check)
GEN_OPFVV_TRANS(vfredsum_vs, reduction_check)
GEN_OPFVV_TRANS(vfredmax_vs, reduction_check)
GEN_OPFVV_TRANS(vfredmin_vs, reduction_check)
+
+/* Vector Widening Floating-Point Reduction Instructions */
+GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check)
@@ -4456,3 +4456,49 @@ GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maxnum, clearq)
GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minnum, clearh)
GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minnum, clearl)
GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum, clearq)
+
+/* Vector Widening Floating-Point Reduction Instructions */
+/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
+void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
+ void *vs2, CPURISCVState *env, uint32_t desc)
+{
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t vl = env->vl;
+ uint32_t i;
+ uint32_t tot = env_archcpu(env)->cfg.vlen / 8;
+ uint32_t s1 = *((uint32_t *)vs1 + H4(0));
+
+ for (i = 0; i < vl; i++) {
+ uint16_t s2 = *((uint16_t *)vs2 + H2(i));
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ s1 = float32_add(s1, float16_to_float32(s2, true, &env->fp_status),
+ &env->fp_status);
+ }
+ *((uint32_t *)vd + H4(0)) = s1;
+ clearl(vd, 1, sizeof(uint32_t), tot);
+}
+
+void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
+ void *vs2, CPURISCVState *env, uint32_t desc)
+{
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t vl = env->vl;
+ uint32_t i;
+ uint32_t tot = env_archcpu(env)->cfg.vlen / 8;
+ uint64_t s1 = *((uint64_t *)vs1);
+
+ for (i = 0; i < vl; i++) {
+ uint32_t s2 = *((uint32_t *)vs2 + H4(i));
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ s1 = float64_add(s1, float32_to_float64(s2, &env->fp_status),
+ &env->fp_status);
+ }
+ *((uint64_t *)vd) = s1;
+ clearq(vd, 1, sizeof(uint64_t), tot);
+}