diff mbox series

[12/30] tcg/loongarch: Implement not/and/or/xor/nor/andc/orc ops

Message ID 20210920080451.408655-13-git@xen0n.name (mailing list archive)
State New, archived
Headers show
Series 64-bit LoongArch port of QEMU TCG | expand

Commit Message

WANG Xuerui Sept. 20, 2021, 8:04 a.m. UTC
Signed-off-by: WANG Xuerui <git@xen0n.name>
---
 tcg/loongarch/tcg-target-con-set.h |  2 +
 tcg/loongarch/tcg-target.c.inc     | 69 ++++++++++++++++++++++++++++++
 2 files changed, 71 insertions(+)

Comments

Richard Henderson Sept. 20, 2021, 2:54 p.m. UTC | #1
On 9/20/21 1:04 AM, WANG Xuerui wrote:
> +    case INDEX_op_andc_i32:
> +    case INDEX_op_andc_i64:
> +        tcg_out_opc_andn(s, a0, a1, a2);
> +        break;

You may want to add the constant case here, implemented with andi, with the constant 
inverted, similarly to the negation of the N constraint.  We do not (but probably should) 
canonicalize andc/orc/eqv constants to and/or/xor during optimization...

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>


r~
diff mbox series

Patch

diff --git a/tcg/loongarch/tcg-target-con-set.h b/tcg/loongarch/tcg-target-con-set.h
index 7e459490ea..385f503552 100644
--- a/tcg/loongarch/tcg-target-con-set.h
+++ b/tcg/loongarch/tcg-target-con-set.h
@@ -16,3 +16,5 @@ 
  */
 C_O0_I1(r)
 C_O1_I1(r, r)
+C_O1_I2(r, r, r)
+C_O1_I2(r, r, rU)
diff --git a/tcg/loongarch/tcg-target.c.inc b/tcg/loongarch/tcg-target.c.inc
index 0ee389fdaa..e364b6c1da 100644
--- a/tcg/loongarch/tcg-target.c.inc
+++ b/tcg/loongarch/tcg-target.c.inc
@@ -372,6 +372,8 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
 {
     TCGArg a0 = args[0];
     TCGArg a1 = args[1];
+    TCGArg a2 = args[2];
+    int c2 = const_args[2];
 
     switch (opc) {
     case INDEX_op_mb:
@@ -417,6 +419,53 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
         tcg_out_opc_srai_d(s, a0, a1, 32);
         break;
 
+    case INDEX_op_not_i32:
+    case INDEX_op_not_i64:
+        tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
+        break;
+
+    case INDEX_op_nor_i32:
+    case INDEX_op_nor_i64:
+        tcg_out_opc_nor(s, a0, a1, a2);
+        break;
+
+    case INDEX_op_andc_i32:
+    case INDEX_op_andc_i64:
+        tcg_out_opc_andn(s, a0, a1, a2);
+        break;
+
+    case INDEX_op_orc_i32:
+    case INDEX_op_orc_i64:
+        tcg_out_opc_orn(s, a0, a1, a2);
+        break;
+
+    case INDEX_op_and_i32:
+    case INDEX_op_and_i64:
+        if (c2) {
+            tcg_out_opc_andi(s, a0, a1, a2);
+        } else {
+            tcg_out_opc_and(s, a0, a1, a2);
+        }
+        break;
+
+    case INDEX_op_or_i32:
+    case INDEX_op_or_i64:
+        if (c2) {
+            tcg_out_opc_ori(s, a0, a1, a2);
+        } else {
+            tcg_out_opc_or(s, a0, a1, a2);
+        }
+        break;
+
+    case INDEX_op_xor_i32:
+    case INDEX_op_xor_i64:
+        if (c2) {
+            tcg_out_opc_xori(s, a0, a1, a2);
+        } else {
+            tcg_out_opc_xor(s, a0, a1, a2);
+        }
+        break;
+
     case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
     case INDEX_op_mov_i64:
     default:
@@ -444,8 +493,28 @@  static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
     case INDEX_op_extrl_i64_i32:
     case INDEX_op_extrh_i64_i32:
     case INDEX_op_ext_i32_i64:
+    case INDEX_op_not_i32:
+    case INDEX_op_not_i64:
         return C_O1_I1(r, r);
 
+    case INDEX_op_nor_i32:
+    case INDEX_op_andc_i32:
+    case INDEX_op_orc_i32:
+    case INDEX_op_nor_i64:
+    case INDEX_op_andc_i64:
+    case INDEX_op_orc_i64:
+        /* LoongArch insns for these ops don't have reg-imm forms */
+        return C_O1_I2(r, r, r);
+
+    case INDEX_op_and_i32:
+    case INDEX_op_or_i32:
+    case INDEX_op_xor_i32:
+    case INDEX_op_and_i64:
+    case INDEX_op_or_i64:
+    case INDEX_op_xor_i64:
+        /* LoongArch reg-imm bitops have their imms ZERO-extended */
+        return C_O1_I2(r, r, rU);
+
     default:
         g_assert_not_reached();
     }