diff mbox series

[v2,12/30] tcg/loongarch64: Implement not/and/or/xor/nor/andc/orc/eqv ops

Message ID 20210921201915.601245-13-git@xen0n.name
State New
Headers show
Series LoongArch64 port of QEMU TCG | expand

Commit Message

WANG Xuerui Sept. 21, 2021, 8:18 p.m. UTC
Signed-off-by: WANG Xuerui <git@xen0n.name>
---
 tcg/loongarch64/tcg-target-con-set.h |   2 +
 tcg/loongarch64/tcg-target.c.inc     | 101 +++++++++++++++++++++++++++
 2 files changed, 103 insertions(+)

Comments

Richard Henderson Sept. 22, 2021, 4:35 a.m. UTC | #1
On 9/21/21 1:18 PM, WANG Xuerui wrote:
> +    case INDEX_op_eqv_i32:
> +    case INDEX_op_eqv_i64:
> +        if (c2) {
> +            /* guaranteed to fit due to constraint */
> +            tcg_out_opc_xori(s, a0, a1, ~a2);
> +        } else {
> +            tcg_out_opc_nor(s, a0, a2, TCG_REG_ZERO);
> +            tcg_out_opc_xor(s, a0, a1, a0);
> +        }
> +        break;

You don't actually have eqv (xnor), so don't pretend that you do.  The middle-end will 
expand this as xor + not on its own.

Otherwise,
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>


r~
WANG Xuerui Sept. 22, 2021, 5:23 p.m. UTC | #2
Hi Richard,

On 9/22/21 12:35, Richard Henderson wrote:
> On 9/21/21 1:18 PM, WANG Xuerui wrote:
>> +    case INDEX_op_eqv_i32:
>> +    case INDEX_op_eqv_i64:
>> +        if (c2) {
>> +            /* guaranteed to fit due to constraint */
>> +            tcg_out_opc_xori(s, a0, a1, ~a2);
>> +        } else {
>> +            tcg_out_opc_nor(s, a0, a2, TCG_REG_ZERO);
>> +            tcg_out_opc_xor(s, a0, a1, a0);
>> +        }
>> +        break;
>
> You don't actually have eqv (xnor), so don't pretend that you do. The 
> middle-end will expand this as xor + not on its own.
Sure; I'll remove support for eqv in v3.
>
> Otherwise,
> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
>
>
> r~
diff mbox series

Patch

diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
index 7e459490ea..9ac24b8ad0 100644
--- a/tcg/loongarch64/tcg-target-con-set.h
+++ b/tcg/loongarch64/tcg-target-con-set.h
@@ -16,3 +16,5 @@ 
  */
 C_O0_I1(r)
 C_O1_I1(r, r)
+C_O1_I2(r, r, rC)
+C_O1_I2(r, r, rU)
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index e000a31a06..89fdb6d7c3 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -374,6 +374,8 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
 {
     TCGArg a0 = args[0];
     TCGArg a1 = args[1];
+    TCGArg a2 = args[2];
+    int c2 = const_args[2];
 
     switch (opc) {
     case INDEX_op_mb:
@@ -419,6 +421,79 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
         tcg_out_opc_srai_d(s, a0, a1, 32);
         break;
 
+    case INDEX_op_not_i32:
+    case INDEX_op_not_i64:
+        tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
+        break;
+
+    case INDEX_op_nor_i32:
+    case INDEX_op_nor_i64:
+        if (c2) {
+            tcg_out_opc_ori(s, a0, a1, a2);
+            tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
+        } else {
+            tcg_out_opc_nor(s, a0, a1, a2);
+        }
+        break;
+
+    case INDEX_op_andc_i32:
+    case INDEX_op_andc_i64:
+        if (c2) {
+            /* guaranteed to fit due to constraint */
+            tcg_out_opc_andi(s, a0, a1, ~a2);
+        } else {
+            tcg_out_opc_andn(s, a0, a1, a2);
+        }
+        break;
+
+    case INDEX_op_orc_i32:
+    case INDEX_op_orc_i64:
+        if (c2) {
+            /* guaranteed to fit due to constraint */
+            tcg_out_opc_ori(s, a0, a1, ~a2);
+        } else {
+            tcg_out_opc_orn(s, a0, a1, a2);
+        }
+        break;
+
+    case INDEX_op_eqv_i32:
+    case INDEX_op_eqv_i64:
+        if (c2) {
+            /* guaranteed to fit due to constraint */
+            tcg_out_opc_xori(s, a0, a1, ~a2);
+        } else {
+            tcg_out_opc_nor(s, a0, a2, TCG_REG_ZERO);
+            tcg_out_opc_xor(s, a0, a1, a0);
+        }
+        break;
+
+    case INDEX_op_and_i32:
+    case INDEX_op_and_i64:
+        if (c2) {
+            tcg_out_opc_andi(s, a0, a1, a2);
+        } else {
+            tcg_out_opc_and(s, a0, a1, a2);
+        }
+        break;
+
+    case INDEX_op_or_i32:
+    case INDEX_op_or_i64:
+        if (c2) {
+            tcg_out_opc_ori(s, a0, a1, a2);
+        } else {
+            tcg_out_opc_or(s, a0, a1, a2);
+        }
+        break;
+
+    case INDEX_op_xor_i32:
+    case INDEX_op_xor_i64:
+        if (c2) {
+            tcg_out_opc_xori(s, a0, a1, a2);
+        } else {
+            tcg_out_opc_xor(s, a0, a1, a2);
+        }
+        break;
+
     case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
     case INDEX_op_mov_i64:
     default:
@@ -446,8 +521,34 @@  static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
     case INDEX_op_extrl_i64_i32:
     case INDEX_op_extrh_i64_i32:
     case INDEX_op_ext_i32_i64:
+    case INDEX_op_not_i32:
+    case INDEX_op_not_i64:
         return C_O1_I1(r, r);
 
+    case INDEX_op_andc_i32:
+    case INDEX_op_andc_i64:
+    case INDEX_op_eqv_i32:
+    case INDEX_op_eqv_i64:
+    case INDEX_op_orc_i32:
+    case INDEX_op_orc_i64:
+        /*
+         * LoongArch insns for these ops don't have reg-imm forms, but we
+         * can express using andi/ori/xori if ~constant satisfies
+         * TCG_CT_CONST_U12.
+         */
+        return C_O1_I2(r, r, rC);
+
+    case INDEX_op_and_i32:
+    case INDEX_op_and_i64:
+    case INDEX_op_nor_i32:
+    case INDEX_op_nor_i64:
+    case INDEX_op_or_i32:
+    case INDEX_op_or_i64:
+    case INDEX_op_xor_i32:
+    case INDEX_op_xor_i64:
+        /* LoongArch reg-imm bitops have their imms ZERO-extended */
+        return C_O1_I2(r, r, rU);
+
     default:
         g_assert_not_reached();
     }