Patchwork [08/13] Add VSX Vector Move Instructions

login
register
mail settings
Submitter Tom Musta
Date Oct. 4, 2013, 1:21 p.m.
Message ID <524EC0E9.4040402@gmail.com>
Download mbox | patch
Permalink /patch/280623/
State New
Headers show

Comments

Tom Musta - Oct. 4, 2013, 1:21 p.m.
This patch adds the vector move instructions:

   - xvabsdp - Vector Absolute Value Double-Precision
   - xvnabsdp - Vector Negative Absolute Value Double-Precision
   - xvnegdp - Vector Negate Double-Precision
   - xvcpsgndp - Vector Copy Sign Double-Precision
   - xvabssp - Vector Absolute Value Single-Precision
   - xvnabssp - Vector Negative Absolute Value Single-Precision
   - xvnegsp - Vector Negate Single-Precision
   - xvcpsgnsp - Vector Copy Sign Single-Precision

Signed-off-by: Tom Musta <tommusta@gmail.com>
---
  target-ppc/translate.c |   68 
++++++++++++++++++++++++++++++++++++++++++++++++
  1 files changed, 68 insertions(+), 0 deletions(-)

  /* Register moves */
@@ -9702,6 +9762,14 @@ GEN_XX2FORM(xsnabsdp, 0x12, 0x16, PPC2_VSX),
  GEN_XX2FORM(xsnegdp, 0x12, 0x17, PPC2_VSX),
  GEN_XX3FORM(xscpsgndp, 0x00, 0x16, PPC2_VSX),

+GEN_XX2FORM(xvabsdp, 0x12, 0x1D, PPC2_VSX),
+GEN_XX2FORM(xvnabsdp, 0x12, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvnegdp, 0x12, 0x1F, PPC2_VSX),
+GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvabssp, 0x12, 0x19, PPC2_VSX),
+GEN_XX2FORM(xvnabssp, 0x12, 0x1A, PPC2_VSX),
+GEN_XX2FORM(xvnegsp, 0x12, 0x1B, PPC2_VSX),
+GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX),
  GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01),

  #undef GEN_SPE

Patch

diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index db54e4f..d03675c 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -7207,6 +7207,66 @@  VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
  VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
  VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)

+#define VSX_VECTOR_MOVE(name, op, sgn_mask)                      \
+static void glue(gen_, name)(DisasContext * ctx)                 \
+    {                                                            \
+        TCGv_i64 xbh, xbl;                                       \
+        if (unlikely(!ctx->vsx_enabled)) { \
+            gen_exception(ctx, POWERPC_EXCP_VSXU);               \
+            return;                                              \
+        }                                                        \
+        xbh = tcg_temp_new();                                    \
+        xbl = tcg_temp_new();                                    \
+        tcg_gen_mov_i64(xbh, cpu_vsrh(xB(ctx->opcode))); \
+        tcg_gen_mov_i64(xbl, cpu_vsrl(xB(ctx->opcode))); \
+        switch (op) {                                            \
+            case OP_ABS: {                                       \
+                tcg_gen_andi_i64(xbh, xbh, ~(sgn_mask));         \
+                tcg_gen_andi_i64(xbl, xbl, ~(sgn_mask));         \
+                break;                                           \
+            }                                                    \
+            case OP_NABS: {                                      \
+                tcg_gen_ori_i64(xbh, xbh, (sgn_mask));           \
+                tcg_gen_ori_i64(xbl, xbl, (sgn_mask));           \
+                break;                                           \
+            }                                                    \
+            case OP_NEG: {                                       \
+                tcg_gen_xori_i64(xbh, xbh, (sgn_mask));          \
+                tcg_gen_xori_i64(xbl, xbl, (sgn_mask));          \
+                break;                                           \
+            }                                                    \
+            case OP_CPSGN: {                                     \
+                TCGv_i64 xah = tcg_temp_new();                   \
+                TCGv_i64 xal = tcg_temp_new();                   \
+                tcg_gen_mov_i64(xah, cpu_vsrh(xA(ctx->opcode))); \
+                tcg_gen_mov_i64(xal, cpu_vsrl(xA(ctx->opcode))); \
+                tcg_gen_andi_i64(xah, xah, (sgn_mask));          \
+                tcg_gen_andi_i64(xal, xal, (sgn_mask));          \
+                tcg_gen_andi_i64(xbh, xbh, ~(sgn_mask));         \
+                tcg_gen_andi_i64(xbl, xbl, ~(sgn_mask));         \
+                tcg_gen_or_i64(xbh, xbh, xah);                   \
+                tcg_gen_or_i64(xbl, xbl, xal);                   \
+                tcg_temp_free(xah);                              \
+                tcg_temp_free(xal);                              \
+                break;                                           \
+            }                                                    \
+        }                                                        \
+        tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xbh); \
+        tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xbl); \
+        tcg_temp_free(xbh);                                      \
+        tcg_temp_free(xbl);                                      \
+    }
+
+VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
+
+

  /***                           SPE 
extension                               ***/