diff mbox

[AArch64,3/5] Improve MOVI handling (Don't update RTX operand in-place)

Message ID 000801ce6077$e32a7bb0$a97f7310$@bolton@arm.com
State New
Headers show

Commit Message

Ian Bolton June 3, 2013, 4:32 p.m. UTC
(This patch is the third of five, where the first 4 do some clean-up and
the last fixes a bug with scalar MOVI.  The bug fix without the clean-up
was particularly ugly!)


This one is focused on cleaning up aarch64_simd_valid_immediate, with
better use of arguments and no in-place modification of RTX operands.

Specifically, I've changed the set of pointers that are passed in
(it's now a struct) and the caller prints out the immediate value
directly instead of letting operand[1] get fudged.


OK for trunk?


Cheers,
Ian


2013-06-03  Ian Bolton  <ian.bolton@arm.com>

        * config/aarch64/aarch64.c (simd_immediate_info): Struct to hold
        information completed by aarch64_simd_valid_immediate.
        (aarch64_legitimate_constant_p): Update arguments.
        (aarch64_simd_valid_immediate): Work with struct rather than many
        pointers.
        (aarch64_simd_scalar_immediate_valid_for_move): Update arguments.
        (aarch64_simd_make_constant): Update arguments.
        (aarch64_output_simd_mov_immediate): Work with struct rather than
        many pointers.  Output immediate directly rather than as operand.
        * config/aarch64/aarch64-protos.h (aarch64_simd_valid_immediate):
        Update prototype.
        * config/aarch64/constraints.md (Dn): Update arguments.
diff mbox

Patch

diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index d1de14e..083ce91 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -156,8 +156,8 @@  bool aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode);
 bool aarch64_simd_imm_zero_p (rtx, enum machine_mode);
 bool aarch64_simd_scalar_immediate_valid_for_move (rtx, enum machine_mode);
 bool aarch64_simd_shift_imm_p (rtx, enum machine_mode, bool);
-bool aarch64_simd_valid_immediate (rtx, enum machine_mode, int, rtx *,
-				   int *, unsigned char *, int *, int *);
+bool aarch64_simd_valid_immediate (rtx, enum machine_mode, bool,
+				   struct simd_immediate_info *);
 bool aarch64_symbolic_address_p (rtx);
 bool aarch64_symbolic_constant_p (rtx, enum aarch64_symbol_context,
 				  enum aarch64_symbol_type *);
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 5f97efe..d83e645 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -87,6 +87,14 @@  struct aarch64_address_info {
   enum aarch64_symbol_type symbol_type;
 };
 
+struct simd_immediate_info {
+  rtx value;
+  int shift;
+  int element_width;
+  unsigned char element_char;
+  bool mvn;
+};
+
 /* The current code model.  */
 enum aarch64_code_model aarch64_cmodel;
 
@@ -5150,8 +5158,7 @@  aarch64_legitimate_constant_p (enum machine_mode mode, rtx x)
   /* This could probably go away because
      we now decompose CONST_INTs according to expand_mov_immediate.  */
   if ((GET_CODE (x) == CONST_VECTOR
-       && aarch64_simd_valid_immediate (x, mode, false,
-					NULL, NULL, NULL, NULL, NULL))
+       && aarch64_simd_valid_immediate (x, mode, false, NULL))
       || CONST_INT_P (x) || aarch64_valid_floating_const (mode, x))
 	return !targetm.cannot_force_const_mem (mode, x);
 
@@ -6144,10 +6151,8 @@  aarch64_vect_float_const_representable_p (rtx x)
 
 /* Return true for valid and false for invalid.  */
 bool
-aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
-			      rtx *modconst, int *elementwidth,
-			      unsigned char *elementchar,
-			      int *mvn, int *shift)
+aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, bool inverse,
+			      struct simd_immediate_info *info)
 {
 #define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG)	\
   matches = 1;						\
@@ -6181,17 +6186,14 @@  aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
 	    || aarch64_vect_float_const_representable_p (op)))
 	return false;
 
-      if (modconst)
-	*modconst = CONST_VECTOR_ELT (op, 0);
-
-      if (elementwidth)
-	*elementwidth = elem_width;
-
-      if (elementchar)
-	*elementchar = sizetochar (elem_width);
-
-      if (shift)
-	*shift = 0;
+      if (info)
+	{
+	  info->value = CONST_VECTOR_ELT (op, 0);
+	  info->element_width = elem_width;
+	  info->element_char = sizetochar (elem_width);
+	  info->mvn = false;
+	  info->shift = 0;
+	}
 
       return true;
     }
@@ -6293,21 +6295,13 @@  aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
       || immtype == 18)
     return false;
 
-
-  if (elementwidth)
-    *elementwidth = elsize;
-
-  if (elementchar)
-    *elementchar = elchar;
-
-  if (mvn)
-    *mvn = emvn;
-
-  if (shift)
-    *shift = eshift;
-
-  if (modconst)
+  if (info)
     {
+      info->element_width = elsize;
+      info->element_char = elchar;
+      info->mvn = emvn != 0;
+      info->shift = eshift;
+
       unsigned HOST_WIDE_INT imm = 0;
 
       /* Un-invert bytes of recognized vector, if necessary.  */
@@ -6324,26 +6318,23 @@  aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
             imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0)
 	      << (i * BITS_PER_UNIT);
 
-          *modconst = GEN_INT (imm);
-        }
+	  info->value = GEN_INT (imm);
+	}
       else
-        {
-          unsigned HOST_WIDE_INT imm = 0;
-
-          for (i = 0; i < elsize / BITS_PER_UNIT; i++)
-            imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT);
+	{
+	  for (i = 0; i < elsize / BITS_PER_UNIT; i++)
+	    imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT);
 
 	  /* Construct 'abcdefgh' because the assembler cannot handle
-	     generic constants.  */
-	  gcc_assert (shift != NULL && mvn != NULL);
-	  if (*mvn)
+	     generic constants.	 */
+	  if (info->mvn)
 	    imm = ~imm;
-	  imm = (imm >> *shift) & 0xff;
-          *modconst = GEN_INT (imm);
+	  imm = (imm >> info->shift) & 0xff;
+	  info->value = GEN_INT (imm);
         }
     }
 
-  return immtype >= 0;
+  return true;
 #undef CHECK
 }
 
@@ -6451,8 +6442,7 @@  aarch64_simd_scalar_immediate_valid_for_move (rtx op, enum machine_mode mode)
   gcc_assert (!VECTOR_MODE_P (mode));
   vmode = aarch64_preferred_simd_mode (mode);
   rtx op_v = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (op));
-  return aarch64_simd_valid_immediate (op_v, vmode, 0, NULL,
-				       NULL, NULL, NULL, NULL);
+  return aarch64_simd_valid_immediate (op_v, vmode, false, NULL);
 }
 
 /* Construct and return a PARALLEL RTX vector.  */
@@ -6680,8 +6670,7 @@  aarch64_simd_make_constant (rtx vals)
     gcc_unreachable ();
 
   if (const_vec != NULL_RTX
-      && aarch64_simd_valid_immediate (const_vec, mode, 0, NULL,
-				       NULL, NULL, NULL, NULL))
+      && aarch64_simd_valid_immediate (const_vec, mode, false, NULL))
     /* Load using MOVI/MVNI.  */
     return const_vec;
   else if ((const_dup = aarch64_simd_dup_constant (vals)) != NULL_RTX)
@@ -7244,45 +7233,57 @@  aarch64_output_simd_mov_immediate (rtx *const_vector,
 				   unsigned width)
 {
   bool is_valid;
-  unsigned char widthc;
-  int lane_width_bits;
   static char templ[40];
-  int shift = 0, mvn = 0;
   const char *mnemonic;
   unsigned int lane_count = 0;
 
-/* This will return true to show const_vector is legal for use as either
-   a AdvSIMD MOVI instruction (or, implicitly, MVNI) immediate.  It
-   writes back various values via the int pointers and it modifies the
-   operand pointed to by CONST_VECTOR in-place, if required.  */
-  is_valid =
-    aarch64_simd_valid_immediate (*const_vector, mode, 0,
-				  const_vector, &lane_width_bits,
-				  &widthc, &mvn, &shift);
+  struct simd_immediate_info info;
+
+  /* This will return true to show const_vector is legal for use as either
+     a AdvSIMD MOVI instruction (or, implicitly, MVNI) immediate.  It will
+     also update INFO to show how the immediate should be generated.  */
+  is_valid = aarch64_simd_valid_immediate (*const_vector, mode, false, &info);
   gcc_assert (is_valid);
 
+  gcc_assert (info.element_width != 0);
+  lane_count = width / info.element_width;
+
   mode = GET_MODE_INNER (mode);
   if (mode == SFmode || mode == DFmode)
     {
-      bool zero_p =
-	aarch64_float_const_zero_rtx_p (*const_vector);
-      gcc_assert (shift == 0);
-      mnemonic = zero_p ? "movi" : "fmov";
+      gcc_assert (info.shift == 0 && ! info.mvn);
+      if (aarch64_float_const_zero_rtx_p (info.value))
+        info.value = GEN_INT (0);
+      else
+	{
+#define buf_size 20
+	  REAL_VALUE_TYPE r;
+	  REAL_VALUE_FROM_CONST_DOUBLE (r, info.value);
+	  char float_buf[buf_size] = {'\0'};
+	  real_to_decimal_for_mode (float_buf, &r, buf_size, buf_size, 1, mode);
+#undef buf_size
+
+	  if (lane_count == 1)
+	    snprintf (templ, sizeof (templ), "fmov\t%%d0, %s", float_buf);
+	  else
+	    snprintf (templ, sizeof (templ), "fmov\t%%0.%d%c, %s",
+		      lane_count, info.element_char, float_buf);
+	  return templ;
+	}
     }
-  else
-    mnemonic = mvn ? "mvni" : "movi";
 
-  gcc_assert (lane_width_bits != 0);
-  lane_count = width / lane_width_bits;
+  mnemonic = info.mvn ? "mvni" : "movi";
 
   if (lane_count == 1)
-    snprintf (templ, sizeof (templ), "%s\t%%d0, %%1", mnemonic);
-  else if (shift)
-    snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, %%1, lsl %d",
-	      mnemonic, lane_count, widthc, shift);
+    snprintf (templ, sizeof (templ), "%s\t%%d0, " HOST_WIDE_INT_PRINT_HEX,
+	      mnemonic, UINTVAL (info.value));
+  else if (info.shift)
+    snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX
+	      ", lsl %d", mnemonic, lane_count, info.element_char,
+	      UINTVAL (info.value), info.shift);
   else
-    snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, %%1",
-	      mnemonic, lane_count, widthc);
+    snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX,
+	      mnemonic, lane_count, info.element_char, UINTVAL (info.value));
   return templ;
 }
 
diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
index d195425..7cafc08 100644
--- a/gcc/config/aarch64/constraints.md
+++ b/gcc/config/aarch64/constraints.md
@@ -143,9 +143,8 @@ 
   "@internal
  A constraint that matches vector of immediates."
  (and (match_code "const_vector")
-      (match_test "aarch64_simd_valid_immediate (op, GET_MODE (op), 0,
-						 NULL, NULL, NULL,
-						 NULL, NULL)")))
+      (match_test "aarch64_simd_valid_immediate (op, GET_MODE (op),
+						 false, NULL)")))
 
 (define_constraint "Dh"
   "@internal