diff mbox series

[AArch64] Handle arguments and return types with partial SVE modes

Message ID mptwoasphtf.fsf@arm.com
State New
Headers show
Series [AArch64] Handle arguments and return types with partial SVE modes | expand

Commit Message

Richard Sandiford Dec. 19, 2019, 1:26 p.m. UTC
Partial SVE modes can be picked up and used by the vector_size(N)
attribute.[*] This means that we need to cope with arguments and return
values with partial SVE modes, which previously triggered asserts like:

  /* Generic vectors that map to SVE modes with -msve-vector-bits=N are
     passed by reference, not by value.  */
  gcc_assert (!aarch64_sve_mode_p (mode));

The ABI for these types is fixed from pre-SVE days, and must in any case
be the same for all -msve-vector-bits=N values.  All we need to do is
ensure that the vectors are passed and returned in the traditional way.

[*] Advanced SIMD always wins for 64-bit and 128-bit vectors though.

Tested on aarch64-linux-gnu, applied as r279571.

Richard


2019-12-19  Richard Sandiford  <richard.sandiford@arm.com>

gcc/
	* config/aarch64/aarch64.c (aarch64_function_value_1): New function,
	split out from...
	(aarch64_function_value): ...here.  Handle partial SVE modes by
	pretending that they have the associated/traditional integer mode,
	then wrap the result in the real mode.
	(aarch64_layout_arg): Take an orig_mode argument and pass it to
	aarch64_function_arg_alignment.  Handle partial SVE modes analogously
	to aarch64_function_value.
	(aarch64_function_arg): Update call accordingly.
	(aarch64_function_arg_advance): Likewise.

gcc/testsuite/
	* gcc.target/aarch64/sve/pcs/gnu_vectors_3.c: New test.
diff mbox series

Patch

Index: gcc/config/aarch64/aarch64.c
===================================================================
--- gcc/config/aarch64/aarch64.c	2019-12-13 10:21:19.000000000 +0000
+++ gcc/config/aarch64/aarch64.c	2019-12-19 13:24:47.977362907 +0000
@@ -4948,22 +4948,12 @@  aarch64_return_in_msb (const_tree valtyp
   return true;
 }
 
-/* Implement TARGET_FUNCTION_VALUE.
-   Define how to find the value returned by a function.  */
-
+/* Subroutine of aarch64_function_value.  MODE is the mode of the argument
+   after promotion, and after partial SVE types have been replaced by
+   their integer equivalents.  */
 static rtx
-aarch64_function_value (const_tree type, const_tree func,
-			bool outgoing ATTRIBUTE_UNUSED)
+aarch64_function_value_1 (const_tree type, machine_mode mode)
 {
-  machine_mode mode;
-  int unsignedp;
-  int count;
-  machine_mode ag_mode;
-
-  mode = TYPE_MODE (type);
-  if (INTEGRAL_TYPE_P (type))
-    mode = promote_function_mode (type, mode, &unsignedp, func, 1);
-
   unsigned int num_zr, num_pr;
   if (type && aarch64_sve_argument_p (type, &num_zr, &num_pr))
     {
@@ -4998,6 +4988,8 @@  aarch64_function_value (const_tree type,
 	}
     }
 
+  int count;
+  machine_mode ag_mode;
   if (aarch64_vfp_is_call_or_return_candidate (mode, type,
 					       &ag_mode, &count, NULL))
     {
@@ -5026,6 +5018,42 @@  aarch64_function_value (const_tree type,
     return gen_rtx_REG (mode, R0_REGNUM);
 }
 
+/* Implement TARGET_FUNCTION_VALUE.
+   Define how to find the value returned by a function.  */
+
+static rtx
+aarch64_function_value (const_tree type, const_tree func,
+			bool outgoing ATTRIBUTE_UNUSED)
+{
+  machine_mode mode;
+  int unsignedp;
+
+  mode = TYPE_MODE (type);
+  if (INTEGRAL_TYPE_P (type))
+    mode = promote_function_mode (type, mode, &unsignedp, func, 1);
+
+  /* Vector types can acquire a partial SVE mode using things like
+     __attribute__((vector_size(N))), and this is potentially useful.
+     However, the choice of mode doesn't affect the type's ABI identity,
+     so we should treat the types as though they had the associated
+     integer mode, just like they did before SVE was introduced.
+
+     We know that the vector must be 128 bits or smaller, otherwise we'd
+     have returned it in memory instead.  */
+  unsigned int vec_flags = aarch64_classify_vector_mode (mode);
+  if ((vec_flags & VEC_ANY_SVE) && (vec_flags & VEC_PARTIAL))
+    {
+      scalar_int_mode int_mode = int_mode_for_mode (mode).require ();
+      rtx reg = aarch64_function_value_1 (type, int_mode);
+      /* Vector types are never returned in the MSB and are never split.  */
+      gcc_assert (REG_P (reg) && GET_MODE (reg) == int_mode);
+      rtx pair = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
+      return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, pair));
+    }
+
+  return aarch64_function_value_1 (type, mode);
+}
+
 /* Implements TARGET_FUNCTION_VALUE_REGNO_P.
    Return true if REGNO is the number of a hard register in which the values
    of called function may come back.  */
@@ -5151,10 +5179,14 @@  aarch64_function_arg_alignment (machine_
 }
 
 /* Layout a function argument according to the AAPCS64 rules.  The rule
-   numbers refer to the rule numbers in the AAPCS64.  */
+   numbers refer to the rule numbers in the AAPCS64.  ORIG_MODE is the
+   mode that was originally given to us by the target hook, whereas the
+   mode in ARG might be the result of replacing partial SVE modes with
+   the equivalent integer mode.  */
 
 static void
-aarch64_layout_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
+aarch64_layout_arg (cumulative_args_t pcum_v, const function_arg_info &arg,
+		    machine_mode orig_mode)
 {
   CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
   tree type = arg.type;
@@ -5168,6 +5200,29 @@  aarch64_layout_arg (cumulative_args_t pc
   if (pcum->aapcs_arg_processed)
     return;
 
+  /* Vector types can acquire a partial SVE mode using things like
+     __attribute__((vector_size(N))), and this is potentially useful.
+     However, the choice of mode doesn't affect the type's ABI identity,
+     so we should treat the types as though they had the associated
+     integer mode, just like they did before SVE was introduced.
+
+     We know that the vector must be 128 bits or smaller, otherwise we'd
+     have passed it by reference instead.  */
+  unsigned int vec_flags = aarch64_classify_vector_mode (mode);
+  if ((vec_flags & VEC_ANY_SVE) && (vec_flags & VEC_PARTIAL))
+    {
+      function_arg_info tmp_arg = arg;
+      tmp_arg.mode = int_mode_for_mode (mode).require ();
+      aarch64_layout_arg (pcum_v, tmp_arg, orig_mode);
+      if (rtx reg = pcum->aapcs_reg)
+	{
+	  gcc_assert (REG_P (reg) && GET_MODE (reg) == tmp_arg.mode);
+	  rtx pair = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
+	  pcum->aapcs_reg = gen_rtx_PARALLEL (mode, gen_rtvec (1, pair));
+	}
+      return;
+    }
+
   pcum->aapcs_arg_processed = true;
 
   unsigned int num_zr, num_pr;
@@ -5289,7 +5344,7 @@  aarch64_layout_arg (cumulative_args_t pc
 	     comparison is there because for > 16 * BITS_PER_UNIT
 	     alignment nregs should be > 2 and therefore it should be
 	     passed by reference rather than value.  */
-	  && (aarch64_function_arg_alignment (mode, type, &abi_break)
+	  && (aarch64_function_arg_alignment (orig_mode, type, &abi_break)
 	      == 16 * BITS_PER_UNIT))
 	{
 	  if (abi_break && warn_psabi && currently_expanding_gimple_stmt)
@@ -5332,7 +5387,7 @@  aarch64_layout_arg (cumulative_args_t pc
 on_stack:
   pcum->aapcs_stack_words = size / UNITS_PER_WORD;
 
-  if (aarch64_function_arg_alignment (mode, type, &abi_break)
+  if (aarch64_function_arg_alignment (orig_mode, type, &abi_break)
       == 16 * BITS_PER_UNIT)
     {
       int new_size = ROUND_UP (pcum->aapcs_stack_size, 16 / UNITS_PER_WORD);
@@ -5360,7 +5415,7 @@  aarch64_function_arg (cumulative_args_t
   if (arg.end_marker_p ())
     return gen_int_mode (pcum->pcs_variant, DImode);
 
-  aarch64_layout_arg (pcum_v, arg);
+  aarch64_layout_arg (pcum_v, arg, arg.mode);
   return pcum->aapcs_reg;
 }
 
@@ -5425,7 +5480,7 @@  aarch64_function_arg_advance (cumulative
       || pcum->pcs_variant == ARM_PCS_SIMD
       || pcum->pcs_variant == ARM_PCS_SVE)
     {
-      aarch64_layout_arg (pcum_v, arg);
+      aarch64_layout_arg (pcum_v, arg, arg.mode);
       gcc_assert ((pcum->aapcs_reg != NULL_RTX)
 		  != (pcum->aapcs_stack_words != 0));
       pcum->aapcs_arg_processed = false;
Index: gcc/testsuite/gcc.target/aarch64/sve/pcs/gnu_vectors_3.c
===================================================================
--- /dev/null	2019-09-17 11:41:18.176664108 +0100
+++ gcc/testsuite/gcc.target/aarch64/sve/pcs/gnu_vectors_3.c	2019-12-19 13:24:47.997362774 +0000
@@ -0,0 +1,58 @@ 
+/* { dg-options "-O -msve-vector-bits=256" } */
+
+typedef unsigned char int8x4_t __attribute__((vector_size (4)));
+
+/*
+** passthru_x0:
+**	ret
+*/
+int8x4_t passthru_x0 (int8x4_t x0) { return x0; }
+
+/*
+** passthru_x1:
+**	mov	w0, w1
+**	ret
+*/
+int8x4_t passthru_x1 (int8x4_t x0, int8x4_t x1) { return x1; }
+
+int8x4_t load (int8x4_t *x0) { return *x0; }
+
+void store (int8x4_t *x0, int8x4_t x1) { *x0 = x1; }
+
+/*
+** stack_callee:
+**	ptrue	p[0-7], vl32
+**	ld1b	(z[0-9]+\.d), \1/z, \[sp\]
+**	st1b	\2, \1, \[x0\]
+**	ret
+*/
+__attribute__((noipa))
+void stack_callee (int8x4_t *x0, int8x4_t x1, int8x4_t x2, int8x4_t x3,
+		   int8x4_t x4, int8x4_t x5, int8x4_t x6, int8x4_t x7,
+		   int8x4_t stack0)
+{
+  *x0 = stack0;
+}
+
+/*
+** stack_callee:
+**	\.\.\.
+**	ptrue	p[0-7], vl32
+**	\.\.\.
+**	ld1b	(z[0-9]+\.d), \1/z, \[x0\]
+**	\.\.\.
+**	st1b	\2, \1, \[sp\]
+**	\.\.\.
+**	ret
+*/
+void stack_caller (int8x4_t *x0, int8x4_t x1)
+{
+  stack_callee (x0, x1, x1, x1, x1, x1, x1, x1, *x0);
+}
+
+/* { dg-final { scan-assembler {\tmov\tw2, w} } } */
+/* { dg-final { scan-assembler {\tmov\tw3, w} } } */
+/* { dg-final { scan-assembler {\tmov\tw4, w} } } */
+/* { dg-final { scan-assembler {\tmov\tw5, w} } } */
+/* { dg-final { scan-assembler {\tmov\tw6, w} } } */
+/* { dg-final { scan-assembler {\tmov\tw7, w} } } */