Patchwork [35/62] tcg-s390: Implement immediate XORs.

login
register
mail settings
Submitter Richard Henderson
Date May 27, 2010, 8:46 p.m.
Message ID <1274993204-30766-36-git-send-email-rth@twiddle.net>
Download mbox | patch
Permalink /patch/53823/
State New
Headers show

Comments

Richard Henderson - May 27, 2010, 8:46 p.m.
Signed-off-by: Richard Henderson <rth@twiddle.net>
---
 tcg/s390/tcg-target.c |   45 +++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 41 insertions(+), 4 deletions(-)

Patch

diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
index 1bc9b4c..ec8c84d 100644
--- a/tcg/s390/tcg-target.c
+++ b/tcg/s390/tcg-target.c
@@ -57,6 +57,8 @@  typedef enum S390Opcode {
     RIL_NILF    = 0xc00b,
     RIL_OIHF    = 0xc00c,
     RIL_OILF    = 0xc00d,
+    RIL_XIHF    = 0xc006,
+    RIL_XILF    = 0xc007,
 
     RI_AGHI     = 0xa70b,
     RI_AHI      = 0xa70a,
@@ -719,6 +721,33 @@  static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
     tgen64_ori(s, dest, val & 0xffffffff00000000ull);
 }
 
+static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
+{
+    tcg_target_long sval = val;
+
+    /* Zero-th, look for no-op.  */
+    if (val == 0) {
+        return;
+    }
+
+    /* First, look for 64-bit values for which it is better to load the
+       value first and perform the xor via registers.  This is true for
+       any 32-bit negative value, where the high 32-bits get flipped too.  */
+    if (sval < 0 && sval == (int32_t)sval) {
+        tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R13, sval);
+        tcg_out_insn(s, RRE, XGR, dest, TCG_REG_R13);
+        return;
+    }
+
+    /* Second, perform the xor by parts.  */
+    if (val & 0xffffffff) {
+        tcg_out_insn(s, RIL, XILF, dest, val);
+    }
+    if (val > 0xffffffff) {
+        tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
+    }
+}
+
 static void tgen32_cmp(TCGContext *s, TCGCond c, TCGReg r1, TCGReg r2)
 {
     if (c > TCG_COND_GT) {
@@ -1202,7 +1231,11 @@  static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
         }
         break;
     case INDEX_op_xor_i32:
-        tcg_out_insn(s, RR, XR, args[0], args[2]);
+        if (const_args[2]) {
+            tgen64_xori(s, args[0], args[2] & 0xffffffff);
+        } else {
+            tcg_out_insn(s, RR, XR, args[0], args[2]);
+        }
         break;
 
     case INDEX_op_and_i64:
@@ -1220,7 +1253,11 @@  static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
         }
         break;
     case INDEX_op_xor_i64:
-        tcg_out_insn(s, RRE, XGR, args[0], args[2]);
+        if (const_args[2]) {
+            tgen64_xori(s, args[0], args[2]);
+        } else {
+            tcg_out_insn(s, RRE, XGR, args[0], args[2]);
+        }
         break;
 
     case INDEX_op_neg_i32:
@@ -1490,7 +1527,7 @@  static const TCGTargetOpDef s390_op_defs[] = {
 
     { INDEX_op_and_i32, { "r", "0", "ri" } },
     { INDEX_op_or_i32, { "r", "0", "ri" } },
-    { INDEX_op_xor_i32, { "r", "0", "r" } },
+    { INDEX_op_xor_i32, { "r", "0", "ri" } },
     { INDEX_op_neg_i32, { "r", "r" } },
 
     { INDEX_op_shl_i32, { "r", "0", "Ri" } },
@@ -1551,7 +1588,7 @@  static const TCGTargetOpDef s390_op_defs[] = {
 
     { INDEX_op_and_i64, { "r", "0", "ri" } },
     { INDEX_op_or_i64, { "r", "0", "ri" } },
-    { INDEX_op_xor_i64, { "r", "0", "r" } },
+    { INDEX_op_xor_i64, { "r", "0", "ri" } },
     { INDEX_op_neg_i64, { "r", "r" } },
 
     { INDEX_op_shl_i64, { "r", "r", "Ri" } },