diff mbox series

[v2,04/14] tcg/loongarch64: Lower add/sub_vec to vadd/vsub

Message ID 20230901093258.942357-5-c@jia.je (mailing list archive)
State New, archived
Headers show
Series Lower TCG vector ops to LSX | expand

Commit Message

Jiajie Chen Sept. 1, 2023, 9:30 a.m. UTC
Lower the following ops:

- add_vec
- sub_vec

Signed-off-by: Jiajie Chen <c@jia.je>
---
 tcg/loongarch64/tcg-target-con-set.h |  1 +
 tcg/loongarch64/tcg-target.c.inc     | 58 ++++++++++++++++++++++++++++
 2 files changed, 59 insertions(+)

Comments

Richard Henderson Sept. 1, 2023, 5:58 p.m. UTC | #1
On 9/1/23 02:30, Jiajie Chen wrote:
> Lower the following ops:
> 
> - add_vec
> - sub_vec
> 
> Signed-off-by: Jiajie Chen <c@jia.je>
> ---
>   tcg/loongarch64/tcg-target-con-set.h |  1 +
>   tcg/loongarch64/tcg-target.c.inc     | 58 ++++++++++++++++++++++++++++
>   2 files changed, 59 insertions(+)
> 
> diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
> index d04916db25..eaa015e813 100644
> --- a/tcg/loongarch64/tcg-target-con-set.h
> +++ b/tcg/loongarch64/tcg-target-con-set.h
> @@ -31,5 +31,6 @@ C_O1_I2(r, 0, rZ)
>   C_O1_I2(r, rZ, ri)
>   C_O1_I2(r, rZ, rJ)
>   C_O1_I2(r, rZ, rZ)
> +C_O1_I2(w, w, wi)
>   C_O1_I2(w, w, wJ)

Similar comment with respect to the constraint.  I think you may be right that we need to 
improve constant handling for vectors.  I'm willing to work with you to improve that as a 
follow-up.


Reviewed-by: Richard Henderson <richard.henderson@linaro.org>


r~
diff mbox series

Patch

diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
index d04916db25..eaa015e813 100644
--- a/tcg/loongarch64/tcg-target-con-set.h
+++ b/tcg/loongarch64/tcg-target-con-set.h
@@ -31,5 +31,6 @@  C_O1_I2(r, 0, rZ)
 C_O1_I2(r, rZ, ri)
 C_O1_I2(r, rZ, rJ)
 C_O1_I2(r, rZ, rZ)
+C_O1_I2(w, w, wi)
 C_O1_I2(w, w, wJ)
 C_O1_I4(r, rZ, rJ, rZ, rZ)
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index 18fe5fc148..555080f2b0 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -1641,6 +1641,18 @@  static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
         [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU},
     };
     LoongArchInsn insn;
+    static const LoongArchInsn add_vec_insn[4] = {
+        OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D
+    };
+    static const LoongArchInsn add_vec_imm_insn[4] = {
+        OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU
+    };
+    static const LoongArchInsn sub_vec_insn[4] = {
+        OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D
+    };
+    static const LoongArchInsn sub_vec_imm_insn[4] = {
+        OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU
+    };
 
     a0 = args[0];
     a1 = args[1];
@@ -1707,6 +1719,46 @@  static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
         }
         tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
         break;
+    case INDEX_op_add_vec:
+        if (const_args[2]) {
+            int64_t value = sextract64(a2, 0, 8 << vece);
+            /* Try vaddi/vsubi */
+            if (0 <= value && value <= 0x1f) {
+                tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \
+                                                 a1, value));
+                break;
+            } else if (-0x1f <= value && value < 0) {
+                tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \
+                                                 a1, -value));
+                break;
+            }
+
+            /* Fallback to dupi + vadd */
+            tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
+            a2 = temp_vec;
+        }
+        tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2));
+        break;
+    case INDEX_op_sub_vec:
+        if (const_args[2]) {
+            int64_t value = sextract64(a2, 0, 8 << vece);
+            /* Try vaddi/vsubi */
+            if (0 <= value && value <= 0x1f) {
+                tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \
+                                                 a1, value));
+                break;
+            } else if (-0x1f <= value && value < 0) {
+                tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \
+                                                 a1, -value));
+                break;
+            }
+
+            /* Fallback to dupi + vsub */
+            tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
+            a2 = temp_vec;
+        }
+        tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2));
+        break;
     case INDEX_op_dupm_vec:
         tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
         break;
@@ -1723,6 +1775,8 @@  int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
     case INDEX_op_dup_vec:
     case INDEX_op_dupm_vec:
     case INDEX_op_cmp_vec:
+    case INDEX_op_add_vec:
+    case INDEX_op_sub_vec:
         return 1;
     default:
         return 0;
@@ -1887,6 +1941,10 @@  static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
     case INDEX_op_cmp_vec:
         return C_O1_I2(w, w, wJ);
 
+    case INDEX_op_add_vec:
+    case INDEX_op_sub_vec:
+        return C_O1_I2(w, w, wi);
+
     default:
         g_assert_not_reached();
     }