Patchwork [v3,07/29] tcg-aarch64: Introduce tcg_fmt_* functions

login
register
mail settings
Submitter Richard Henderson
Date Sept. 2, 2013, 5:54 p.m.
Message ID <1378144503-15808-8-git-send-email-rth@twiddle.net>
Download mbox | patch
Permalink /patch/272051/
State New
Headers show

Comments

Richard Henderson - Sept. 2, 2013, 5:54 p.m.
Now that we've converted opcode fields to pre-shifted insns, we
can merge the implementation of arithmetic and shift insns.

Signed-off-by: Richard Henderson <rth@twiddle.net>
---
 tcg/aarch64/tcg-target.c | 78 +++++++++++++++++++++++-------------------------
 1 file changed, 38 insertions(+), 40 deletions(-)

Patch

diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index de97fbd..e2f3d1c 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -297,6 +297,30 @@  static inline uint32_t tcg_in32(TCGContext *s)
     return v;
 }
 
+/*
+ * Encode various formats.  Note that since the architecture document is
+ * still private, these names are made up.
+ */
+
+static inline void tcg_fmt_Rdnm(TCGContext *s, AArch64Insn insn, bool ext,
+                                TCGReg rd, TCGReg rn, TCGReg rm)
+{
+    tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd);
+}
+
+static inline void tcg_fmt_Rdnm_shift(TCGContext *s, AArch64Insn insn,
+                                      bool ext, TCGReg rd, TCGReg rn,
+                                      TCGReg rm, int shift_imm)
+{
+    unsigned int shift;
+    if (shift_imm > 0) {
+        shift = shift_imm << 10 | 1 << 22;
+    } else {
+        shift = (-shift_imm) << 10;
+    }
+    tcg_out32(s, insn | ext << 31 | shift | rm << 16 | rn << 5 | rd);
+}
+
 static inline void tcg_out_ldst_9(TCGContext *s,
                                   enum aarch64_ldst_op_data op_data,
                                   enum aarch64_ldst_op_type op_type,
@@ -438,23 +462,6 @@  static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
                  arg, arg1, arg2);
 }
 
-static inline void tcg_out_arith(TCGContext *s, AArch64Insn insn,
-                                 bool ext, TCGReg rd, TCGReg rn, TCGReg rm,
-                                 int shift_imm)
-{
-    /* Using shifted register arithmetic operations */
-    /* if extended register operation (64bit) just OR with 0x80 << 24 */
-    unsigned int shift, base = insn | (ext ? 0x80000000 : 0);
-    if (shift_imm == 0) {
-        shift = 0;
-    } else if (shift_imm > 0) {
-        shift = shift_imm << 10 | 1 << 22;
-    } else /* (shift_imm < 0) */ {
-        shift = (-shift_imm) << 10;
-    }
-    tcg_out32(s, base | rm << 16 | shift | rn << 5 | rd);
-}
-
 static inline void tcg_out_mul(TCGContext *s, bool ext,
                                TCGReg rd, TCGReg rn, TCGReg rm)
 {
@@ -463,15 +470,6 @@  static inline void tcg_out_mul(TCGContext *s, bool ext,
     tcg_out32(s, base | rm << 16 | rn << 5 | rd);
 }
 
-static inline void tcg_out_shiftrot_reg(TCGContext *s,
-                                        AArch64Insn insn, bool ext,
-                                        TCGReg rd, TCGReg rn, TCGReg rm)
-{
-    /* using 2-source data processing instructions 0x1ac02000 */
-    unsigned int base = insn | (ext ? 0x80000000 : 0);
-    tcg_out32(s, base | rm << 16 | rn << 5 | rd);
-}
-
 static inline void tcg_out_ubfm(TCGContext *s, bool ext, TCGReg rd, TCGReg rn,
                                 unsigned int a, unsigned int b)
 {
@@ -539,7 +537,7 @@  static inline void tcg_out_cmp(TCGContext *s, bool ext, TCGReg rn, TCGReg rm,
                                int shift_imm)
 {
     /* Using CMP alias SUBS wzr, Wn, Wm */
-    tcg_out_arith(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm, shift_imm);
+    tcg_fmt_Rdnm_shift(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm, shift_imm);
 }
 
 static inline void tcg_out_cset(TCGContext *s, bool ext, TCGReg rd, TCGCond c)
@@ -896,8 +894,8 @@  static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg,
     tcg_out_addi(s, 1, TCG_REG_X2, base, tlb_offset & 0xfff000);
     /* Merge the tlb index contribution into X2.
        X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */
-    tcg_out_arith(s, INSN_ADD, 1, TCG_REG_X2, TCG_REG_X2,
-                  TCG_REG_X0, -CPU_TLB_ENTRY_BITS);
+    tcg_fmt_Rdnm_shift(s, INSN_ADD, 1, TCG_REG_X2, TCG_REG_X2,
+                       TCG_REG_X0, -CPU_TLB_ENTRY_BITS);
     /* Merge "low bits" from tlb offset, load the tlb comparator into X0.
        X0 = load [X2 + (tlb_offset & 0x000fff)] */
     tcg_out_ldst(s, TARGET_LONG_BITS == 64 ? LDST_64 : LDST_32,
@@ -1173,27 +1171,27 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
 
     case INDEX_op_add_i64:
     case INDEX_op_add_i32:
-        tcg_out_arith(s, INSN_ADD, ext, a0, a1, a2, 0);
+        tcg_fmt_Rdnm(s, INSN_ADD, ext, a0, a1, a2);
         break;
 
     case INDEX_op_sub_i64:
     case INDEX_op_sub_i32:
-        tcg_out_arith(s, INSN_SUB, ext, a0, a1, a2, 0);
+        tcg_fmt_Rdnm(s, INSN_SUB, ext, a0, a1, a2);
         break;
 
     case INDEX_op_and_i64:
     case INDEX_op_and_i32:
-        tcg_out_arith(s, INSN_AND, ext, a0, a1, a2, 0);
+        tcg_fmt_Rdnm(s, INSN_AND, ext, a0, a1, a2);
         break;
 
     case INDEX_op_or_i64:
     case INDEX_op_or_i32:
-        tcg_out_arith(s, INSN_ORR, ext, a0, a1, a2, 0);
+        tcg_fmt_Rdnm(s, INSN_ORR, ext, a0, a1, a2);
         break;
 
     case INDEX_op_xor_i64:
     case INDEX_op_xor_i32:
-        tcg_out_arith(s, INSN_EOR, ext, a0, a1, a2, 0);
+        tcg_fmt_Rdnm(s, INSN_EOR, ext, a0, a1, a2);
         break;
 
     case INDEX_op_mul_i64:
@@ -1206,7 +1204,7 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
         if (c2) {
             tcg_out_shl(s, ext, a0, a1, a2);
         } else {
-            tcg_out_shiftrot_reg(s, INSN_LSLV, ext, a0, a1, a2);
+            tcg_fmt_Rdnm(s, INSN_LSLV, ext, a0, a1, a2);
         }
         break;
 
@@ -1215,7 +1213,7 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
         if (c2) {
             tcg_out_shr(s, ext, a0, a1, a2);
         } else {
-            tcg_out_shiftrot_reg(s, INSN_LSRV, ext, a0, a1, a2);
+            tcg_fmt_Rdnm(s, INSN_LSRV, ext, a0, a1, a2);
         }
         break;
 
@@ -1224,7 +1222,7 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
         if (c2) {
             tcg_out_sar(s, ext, a0, a1, a2);
         } else {
-            tcg_out_shiftrot_reg(s, INSN_ASRV, ext, a0, a1, a2);
+            tcg_fmt_Rdnm(s, INSN_ASRV, ext, a0, a1, a2);
         }
         break;
 
@@ -1233,7 +1231,7 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
         if (c2) {
             tcg_out_rotr(s, ext, a0, a1, a2);
         } else {
-            tcg_out_shiftrot_reg(s, INSN_RORV, ext, a0, a1, a2);
+            tcg_fmt_Rdnm(s, INSN_RORV, ext, a0, a1, a2);
         }
         break;
 
@@ -1242,8 +1240,8 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
         if (c2) {
             tcg_out_rotl(s, ext, a0, a1, a2);
         } else {
-            tcg_out_arith(s, INSN_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2, 0);
-            tcg_out_shiftrot_reg(s, INSN_RORV, ext, a0, a1, TCG_REG_TMP);
+            tcg_fmt_Rdnm(s, INSN_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2);
+            tcg_fmt_Rdnm(s, INSN_RORV, ext, a0, a1, TCG_REG_TMP);
         }
         break;