diff mbox series

[v3,04/37] target/ppc: vmulh* instructions use gvec

Message ID 20220210123447.3933301-5-matheus.ferst@eldorado.org.br (mailing list archive)
State New, archived
Headers show
Series target/ppc: PowerISA Vector/VSX instruction batch | expand

Commit Message

Matheus K. Ferst Feb. 10, 2022, 12:34 p.m. UTC
From: "Lucas Mateus Castro (alqotel)" <lucas.castro@eldorado.org.br>

Changed vmulhuw, vmulhud, vmulhsw, vmulhsd to use
gvec instructions

Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br>
---
 target/ppc/helper.h                 |   8 +-
 target/ppc/int_helper.c             |   8 +-
 target/ppc/translate/vmx-impl.c.inc | 154 +++++++++++++++++++++++++++-
 3 files changed, 158 insertions(+), 12 deletions(-)

Comments

Richard Henderson Feb. 11, 2022, 3:51 a.m. UTC | #1
On 2/10/22 23:34, matheus.ferst@eldorado.org.br wrote:
> +static void do_vx_vmulhu_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
> +{
> +    TCGv_vec a1, b1, mask, w, k;
> +    unsigned bits;
> +    bits = (vece == MO_32) ? 16 : 32;
> +
> +    a1 = tcg_temp_new_vec_matching(t);
> +    b1 = tcg_temp_new_vec_matching(t);
> +    w  = tcg_temp_new_vec_matching(t);
> +    k  = tcg_temp_new_vec_matching(t);
> +    mask = tcg_temp_new_vec_matching(t);
> +
> +    tcg_gen_dupi_vec(vece, mask, (vece == MO_32) ? 0xFFFF : 0xFFFFFFFF);
> +    tcg_gen_and_vec(vece, a1, a, mask);
> +    tcg_gen_and_vec(vece, b1, b, mask);
> +    tcg_gen_mul_vec(vece, t, a1, b1);
> +    tcg_gen_shri_vec(vece, k, t, bits);
> +
> +    tcg_gen_shri_vec(vece, a1, a, bits);
> +    tcg_gen_mul_vec(vece, t, a1, b1);
> +    tcg_gen_add_vec(vece, t, t, k);
> +    tcg_gen_and_vec(vece, k, t, mask);
> +    tcg_gen_shri_vec(vece, w, t, bits);
> +
> +    tcg_gen_and_vec(vece, a1, a, mask);
> +    tcg_gen_shri_vec(vece, b1, b, bits);
> +    tcg_gen_mul_vec(vece, t, a1, b1);
> +    tcg_gen_add_vec(vece, t, t, k);
> +    tcg_gen_shri_vec(vece, k, t, bits);
> +
> +    tcg_gen_shri_vec(vece, a1, a, bits);
> +    tcg_gen_mul_vec(vece, t, a1, b1);
> +    tcg_gen_add_vec(vece, t, t, w);
> +    tcg_gen_add_vec(vece, t, t, k);

I don't think that you should decompose 4 high-part 32-bit multiplies into 4 32-bit 
multiplies plus lots of arithmetic.  This is not a win.  You're actually better off with 
pure integer arithmetic here.

You could instead widen these into 2 64-bit multiplies, plus some arithmetic.  That's 
certainly closer to the break-even point.

> +        {
> +            .fniv = do_vx_vmulhu_vec,
> +            .fno  = gen_helper_VMULHUD,
> +            .opt_opc = vecop_list,
> +            .vece = MO_64
> +        },
> +    };

As for the two high-part 64-bit multiplies, I think that should definitely remain an 
integer operation.

You probably want to expand these with inline integer operations using .fni[48].

> +static void do_vx_vmulhs_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)

Very much likewise.


r~
diff mbox series

Patch

diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index 1125a1704d..92595a42df 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -207,10 +207,10 @@  DEF_HELPER_3(VMULOUB, void, avr, avr, avr)
 DEF_HELPER_3(VMULOUH, void, avr, avr, avr)
 DEF_HELPER_3(VMULOUW, void, avr, avr, avr)
 DEF_HELPER_3(VMULOUD, void, avr, avr, avr)
-DEF_HELPER_3(VMULHSW, void, avr, avr, avr)
-DEF_HELPER_3(VMULHUW, void, avr, avr, avr)
-DEF_HELPER_3(VMULHSD, void, avr, avr, avr)
-DEF_HELPER_3(VMULHUD, void, avr, avr, avr)
+DEF_HELPER_4(VMULHSW, void, avr, avr, avr, i32)
+DEF_HELPER_4(VMULHUW, void, avr, avr, avr, i32)
+DEF_HELPER_4(VMULHSD, void, avr, avr, avr, i32)
+DEF_HELPER_4(VMULHUD, void, avr, avr, avr, i32)
 DEF_HELPER_3(vslo, void, avr, avr, avr)
 DEF_HELPER_3(vsro, void, avr, avr, avr)
 DEF_HELPER_3(vsrv, void, avr, avr, avr)
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index e134162fdd..79cde68f19 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -1200,7 +1200,7 @@  void helper_VMULOUD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
     mulu64(&r->VsrD(1), &r->VsrD(0), a->VsrD(1), b->VsrD(1));
 }
 
-void helper_VMULHSW(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+void helper_VMULHSW(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t desc)
 {
     int i;
 
@@ -1209,7 +1209,7 @@  void helper_VMULHSW(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
     }
 }
 
-void helper_VMULHUW(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+void helper_VMULHUW(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t desc)
 {
     int i;
 
@@ -1219,7 +1219,7 @@  void helper_VMULHUW(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
     }
 }
 
-void helper_VMULHSD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+void helper_VMULHSD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t desc)
 {
     uint64_t discard;
 
@@ -1227,7 +1227,7 @@  void helper_VMULHSD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
     muls64(&discard, &r->u64[1], a->s64[1], b->s64[1]);
 }
 
-void helper_VMULHUD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+void helper_VMULHUD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t desc)
 {
     uint64_t discard;
 
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
index 62d0642226..bed8df81c4 100644
--- a/target/ppc/translate/vmx-impl.c.inc
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -2126,10 +2126,156 @@  TRANS_FLAGS2(ISA310, VMULOSD, do_vx_helper, gen_helper_VMULOSD)
 TRANS_FLAGS2(ISA310, VMULEUD, do_vx_helper, gen_helper_VMULEUD)
 TRANS_FLAGS2(ISA310, VMULOUD, do_vx_helper, gen_helper_VMULOUD)
 
-TRANS_FLAGS2(ISA310, VMULHSW, do_vx_helper, gen_helper_VMULHSW)
-TRANS_FLAGS2(ISA310, VMULHSD, do_vx_helper, gen_helper_VMULHSD)
-TRANS_FLAGS2(ISA310, VMULHUW, do_vx_helper, gen_helper_VMULHUW)
-TRANS_FLAGS2(ISA310, VMULHUD, do_vx_helper, gen_helper_VMULHUD)
+static void do_vx_vmulhu_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+    TCGv_vec a1, b1, mask, w, k;
+    unsigned bits;
+    bits = (vece == MO_32) ? 16 : 32;
+
+    a1 = tcg_temp_new_vec_matching(t);
+    b1 = tcg_temp_new_vec_matching(t);
+    w  = tcg_temp_new_vec_matching(t);
+    k  = tcg_temp_new_vec_matching(t);
+    mask = tcg_temp_new_vec_matching(t);
+
+    tcg_gen_dupi_vec(vece, mask, (vece == MO_32) ? 0xFFFF : 0xFFFFFFFF);
+    tcg_gen_and_vec(vece, a1, a, mask);
+    tcg_gen_and_vec(vece, b1, b, mask);
+    tcg_gen_mul_vec(vece, t, a1, b1);
+    tcg_gen_shri_vec(vece, k, t, bits);
+
+    tcg_gen_shri_vec(vece, a1, a, bits);
+    tcg_gen_mul_vec(vece, t, a1, b1);
+    tcg_gen_add_vec(vece, t, t, k);
+    tcg_gen_and_vec(vece, k, t, mask);
+    tcg_gen_shri_vec(vece, w, t, bits);
+
+    tcg_gen_and_vec(vece, a1, a, mask);
+    tcg_gen_shri_vec(vece, b1, b, bits);
+    tcg_gen_mul_vec(vece, t, a1, b1);
+    tcg_gen_add_vec(vece, t, t, k);
+    tcg_gen_shri_vec(vece, k, t, bits);
+
+    tcg_gen_shri_vec(vece, a1, a, bits);
+    tcg_gen_mul_vec(vece, t, a1, b1);
+    tcg_gen_add_vec(vece, t, t, w);
+    tcg_gen_add_vec(vece, t, t, k);
+
+    tcg_temp_free_vec(a1);
+    tcg_temp_free_vec(b1);
+    tcg_temp_free_vec(w);
+    tcg_temp_free_vec(k);
+    tcg_temp_free_vec(mask);
+}
+
+static bool do_vx_mulhu(DisasContext *ctx, arg_VX *a, unsigned vece)
+{
+    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+    REQUIRE_VECTOR(ctx);
+
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_mul_vec, INDEX_op_add_vec, INDEX_op_shri_vec, 0
+    };
+
+    static const GVecGen3 op[2] = {
+        {
+            .fniv = do_vx_vmulhu_vec,
+            .fno  = gen_helper_VMULHUW,
+            .opt_opc = vecop_list,
+            .vece = MO_32
+        },
+        {
+            .fniv = do_vx_vmulhu_vec,
+            .fno  = gen_helper_VMULHUD,
+            .opt_opc = vecop_list,
+            .vece = MO_64
+        },
+    };
+
+    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+                    avr_full_offset(a->vrb), 16, 16, &op[vece - MO_32]);
+
+    return true;
+
+}
+
+static void do_vx_vmulhs_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+    TCGv_vec a1, b1, mask, w, k;
+    unsigned bits;
+    bits = (vece == MO_32) ? 16 : 32;
+
+    a1 = tcg_temp_new_vec_matching(t);
+    b1 = tcg_temp_new_vec_matching(t);
+    w  = tcg_temp_new_vec_matching(t);
+    k  = tcg_temp_new_vec_matching(t);
+    mask = tcg_temp_new_vec_matching(t);
+
+    tcg_gen_dupi_vec(vece, mask, (vece == MO_32) ? 0xFFFF : 0xFFFFFFFF);
+    tcg_gen_and_vec(vece, a1, a, mask);
+    tcg_gen_and_vec(vece, b1, b, mask);
+    tcg_gen_mul_vec(vece, t, a1, b1);
+    tcg_gen_shri_vec(vece, k, t, bits);
+
+    tcg_gen_sari_vec(vece, a1, a, bits);
+    tcg_gen_mul_vec(vece, t, a1, b1);
+    tcg_gen_add_vec(vece, t, t, k);
+    tcg_gen_and_vec(vece, k, t, mask);
+    tcg_gen_sari_vec(vece, w, t, bits);
+
+    tcg_gen_and_vec(vece, a1, a, mask);
+    tcg_gen_sari_vec(vece, b1, b, bits);
+    tcg_gen_mul_vec(vece, t, a1, b1);
+    tcg_gen_add_vec(vece, t, t, k);
+    tcg_gen_sari_vec(vece, k, t, bits);
+
+    tcg_gen_sari_vec(vece, a1, a, bits);
+    tcg_gen_mul_vec(vece, t, a1, b1);
+    tcg_gen_add_vec(vece, t, t, w);
+    tcg_gen_add_vec(vece, t, t, k);
+
+    tcg_temp_free_vec(a1);
+    tcg_temp_free_vec(b1);
+    tcg_temp_free_vec(w);
+    tcg_temp_free_vec(k);
+    tcg_temp_free_vec(mask);
+}
+
+static bool do_vx_mulhs(DisasContext *ctx, arg_VX *a, unsigned vece)
+{
+    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+    REQUIRE_VECTOR(ctx);
+
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_mul_vec, INDEX_op_add_vec, INDEX_op_shri_vec,
+        INDEX_op_sari_vec, 0
+    };
+
+    static const GVecGen3 op[2] = {
+        {
+            .fniv = do_vx_vmulhs_vec,
+            .fno  = gen_helper_VMULHSW,
+            .opt_opc = vecop_list,
+            .vece = MO_32
+        },
+        {
+            .fniv = do_vx_vmulhs_vec,
+            .fno  = gen_helper_VMULHSD,
+            .opt_opc = vecop_list,
+            .vece = MO_64
+        },
+    };
+
+    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+                    avr_full_offset(a->vrb), 16, 16, &op[vece - MO_32]);
+
+    return true;
+}
+
+TRANS(VMULHSW, do_vx_mulhs, MO_32)
+TRANS(VMULHSD, do_vx_mulhs, MO_64)
+TRANS(VMULHUW, do_vx_mulhu, MO_32)
+TRANS(VMULHUD, do_vx_mulhu, MO_64)
 
 #undef GEN_VR_LDX
 #undef GEN_VR_STX