diff mbox series

[13/18] i386: Dot product AVX helper prep

Message ID 20220825221411.35122-14-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series target/i386: make most SSE helpers generic in the vector size | expand

Commit Message

Paolo Bonzini Aug. 25, 2022, 10:14 p.m. UTC
From: Paul Brook <paul@nowt.org>

Make the dpps and dppd helpers AVX-ready

I can't see any obvious reason why dppd shouldn't work on 256 bit ymm
registers, but both AMD and Intel agree that it's xmm only.

Signed-off-by: Paul Brook <paul@nowt.org>
Message-Id: <20220424220204.2493824-17-paul@nowt.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 target/i386/ops_sse.h | 80 ++++++++++++++++++++++++-------------------
 1 file changed, 45 insertions(+), 35 deletions(-)

Comments

Richard Henderson Aug. 26, 2022, 1:01 a.m. UTC | #1
On 8/25/22 15:14, Paolo Bonzini wrote:
> From: Paul Brook<paul@nowt.org>
> 
> Make the dpps and dppd helpers AVX-ready
> 
> I can't see any obvious reason why dppd shouldn't work on 256 bit ymm
> registers, but both AMD and Intel agree that it's xmm only.
> 
> Signed-off-by: Paul Brook<paul@nowt.org>
> Message-Id:<20220424220204.2493824-17-paul@nowt.org>
> Signed-off-by: Paolo Bonzini<pbonzini@redhat.com>
> ---
>   target/i386/ops_sse.h | 80 ++++++++++++++++++++++++-------------------
>   1 file changed, 45 insertions(+), 35 deletions(-)

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>

r~
diff mbox series

Patch

diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
index 851a05d594..0493a26804 100644
--- a/target/i386/ops_sse.h
+++ b/target/i386/ops_sse.h
@@ -1942,55 +1942,64 @@  SSE_HELPER_I(helper_blendps, L, 4, FBLENDP)
 SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP)
 SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP)
 
-void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
+void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+                               uint32_t mask)
 {
+    Reg *v = d;
     float32 prod1, prod2, temp2, temp3, temp4;
+    int i;
 
-    /*
-     * We must evaluate (A+B)+(C+D), not ((A+B)+C)+D
-     * to correctly round the intermediate results
-     */
-    if (mask & (1 << 4)) {
-        prod1 = float32_mul(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status);
-    } else {
-        prod1 = float32_zero;
-    }
-    if (mask & (1 << 5)) {
-        prod2 = float32_mul(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status);
-    } else {
-        prod2 = float32_zero;
-    }
-    temp2 = float32_add(prod1, prod2, &env->sse_status);
-    if (mask & (1 << 6)) {
-        prod1 = float32_mul(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status);
-    } else {
-        prod1 = float32_zero;
-    }
-    if (mask & (1 << 7)) {
-        prod2 = float32_mul(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status);
-    } else {
-        prod2 = float32_zero;
-    }
-    temp3 = float32_add(prod1, prod2, &env->sse_status);
-    temp4 = float32_add(temp2, temp3, &env->sse_status);
+    for (i = 0; i < 2 << SHIFT; i += 4) {
+        /*
+         * We must evaluate (A+B)+(C+D), not ((A+B)+C)+D
+         * to correctly round the intermediate results
+         */
+        if (mask & (1 << 4)) {
+            prod1 = float32_mul(v->ZMM_S(i), s->ZMM_S(i), &env->sse_status);
+        } else {
+            prod1 = float32_zero;
+        }
+        if (mask & (1 << 5)) {
+            prod2 = float32_mul(v->ZMM_S(i+1), s->ZMM_S(i+1), &env->sse_status);
+        } else {
+            prod2 = float32_zero;
+        }
+        temp2 = float32_add(prod1, prod2, &env->sse_status);
+        if (mask & (1 << 6)) {
+            prod1 = float32_mul(v->ZMM_S(i+2), s->ZMM_S(i+2), &env->sse_status);
+        } else {
+            prod1 = float32_zero;
+        }
+        if (mask & (1 << 7)) {
+            prod2 = float32_mul(v->ZMM_S(i+3), s->ZMM_S(i+3), &env->sse_status);
+        } else {
+            prod2 = float32_zero;
+        }
+        temp3 = float32_add(prod1, prod2, &env->sse_status);
+        temp4 = float32_add(temp2, temp3, &env->sse_status);
 
-    d->ZMM_S(0) = (mask & (1 << 0)) ? temp4 : float32_zero;
-    d->ZMM_S(1) = (mask & (1 << 1)) ? temp4 : float32_zero;
-    d->ZMM_S(2) = (mask & (1 << 2)) ? temp4 : float32_zero;
-    d->ZMM_S(3) = (mask & (1 << 3)) ? temp4 : float32_zero;
+        d->ZMM_S(i) = (mask & (1 << 0)) ? temp4 : float32_zero;
+        d->ZMM_S(i+1) = (mask & (1 << 1)) ? temp4 : float32_zero;
+        d->ZMM_S(i+2) = (mask & (1 << 2)) ? temp4 : float32_zero;
+        d->ZMM_S(i+3) = (mask & (1 << 3)) ? temp4 : float32_zero;
+    }
 }
 
-void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
+#if SHIFT == 1
+/* Oddly, there is no ymm version of dppd */
+void glue(helper_dppd, SUFFIX)(CPUX86State *env,
+                               Reg *d, Reg *s, uint32_t mask)
 {
+    Reg *v = d;
     float64 prod1, prod2, temp2;
 
     if (mask & (1 << 4)) {
-        prod1 = float64_mul(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status);
+        prod1 = float64_mul(v->ZMM_D(0), s->ZMM_D(0), &env->sse_status);
     } else {
         prod1 = float64_zero;
     }
     if (mask & (1 << 5)) {
-        prod2 = float64_mul(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status);
+        prod2 = float64_mul(v->ZMM_D(1), s->ZMM_D(1), &env->sse_status);
     } else {
         prod2 = float64_zero;
     }
@@ -1998,6 +2007,7 @@  void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
     d->ZMM_D(0) = (mask & (1 << 0)) ? temp2 : float64_zero;
     d->ZMM_D(1) = (mask & (1 << 1)) ? temp2 : float64_zero;
 }
+#endif
 
 void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
                                   uint32_t offset)