diff mbox series

[v2,ARM,6x] :MVE ACLE vaddq intrinsics using arithmetic plus operator.

Message ID AM0PR08MB53806CC9C87DD48AAF24EDD69BF40@AM0PR08MB5380.eurprd08.prod.outlook.com
State New
Headers show
Series [v2,ARM,6x] :MVE ACLE vaddq intrinsics using arithmetic plus operator. | expand

Commit Message

Srinath Parvathaneni March 19, 2020, 5:59 p.m. UTC
Hello Kyrill,

This patch addresses all the comments in patch version v2.
(version v2) https://gcc.gnu.org/pipermail/gcc-patches/2019-November/534349.html

####

Hello,

This patch supports following MVE ACLE vaddq intrinsics. The RTL patterns for this intrinsics
are added using arithmetic "plus" operator.

vaddq_s8, vaddq_s16, vaddq_s32, vaddq_u8, vaddq_u16, vaddq_u32, vaddq_f16, vaddq_f32.

Please refer to M-profile Vector Extension (MVE) intrinsics [1]  for more details.
[1]  https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics

Regression tested on arm-none-eabi and found no regressions.

Ok for trunk?

Thanks,
Srinath.

gcc/ChangeLog:

2020-03-19  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
	    Andre Vieira  <andre.simoesdiasvieira@arm.com>
            Mihail Ionescu  <mihail.ionescu@arm.com>

	* config/arm/arm_mve.h (vaddq_s8): Define macro.
	(vaddq_s16): Likewise.
	(vaddq_s32): Likewise.
	(vaddq_u8): Likewise.
	(vaddq_u16): Likewise.
	(vaddq_u32): Likewise.
	(vaddq_f16): Likewise.
	(vaddq_f32): Likewise.
	(__arm_vaddq_s8): Define intrinsic.
	(__arm_vaddq_s16): Likewise.
	(__arm_vaddq_s32): Likewise.
	(__arm_vaddq_u8): Likewise.
	(__arm_vaddq_u16): Likewise.
	(__arm_vaddq_u32): Likewise.
	(__arm_vaddq_f16): Likewise.
	(__arm_vaddq_f32): Likewise.
	(vaddq): Define polymorphic variant.
	* config/arm/iterators.md (VNIM): Define mode iterator for common types
	Neon, IWMMXT and MVE.
	(VNINOTM): Likewise.
	* config/arm/mve.md (mve_vaddq<mode>): Define RTL pattern.
	(mve_vaddq_f<mode>): Define RTL pattern.
	* config/arm/neon.md (add<mode>3): Rename to addv4hf3 RTL pattern.
	(addv8hf3_neon): Define RTL pattern.
	* config/arm/vec-common.md (add<mode>3): Modify standard add RTL pattern
	to support MVE.
	(addv8hf3): Define standard RTL pattern for MVE and Neon.
	(add<mode>3): Modify existing standard add RTL pattern for Neon and IWMMXT.

gcc/testsuite/ChangeLog:

2020-03-19  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
	    Andre Vieira  <andre.simoesdiasvieira@arm.com>
            Mihail Ionescu  <mihail.ionescu@arm.com>

	* gcc.target/arm/mve/intrinsics/vaddq_f16.c: New test.
	* gcc.target/arm/mve/intrinsics/vaddq_f32.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vaddq_s16.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vaddq_s32.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vaddq_s8.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vaddq_u16.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vaddq_u32.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vaddq_u8.c: Likewise.


###############     Attachment also inlined for ease of reply    ###############
diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
index 5ea42bd6a5bd98d5c77a0e7da3464ba6b431770b..55c256910bb7f4c616ea592be699f7f4fc3f17f7 100644
--- a/gcc/config/arm/arm_mve.h
+++ b/gcc/config/arm/arm_mve.h
@@ -1898,6 +1898,14 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
 #define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
 #define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
 #define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
+#define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b)
+#define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b)
+#define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b)
+#define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b)
+#define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b)
+#define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b)
+#define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b)
+#define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b)
 #endif
 
 __extension__ extern __inline void
@@ -12341,6 +12349,48 @@ __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset,
   __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value);
 }
 
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return __a + __b;
+}
+
 #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point.  */
 
 __extension__ extern __inline void
@@ -14707,6 +14757,20 @@ __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offs
   __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf (__base, __offset, __value, __p);
 }
 
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_f16 (float16x8_t __a, float16x8_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+  return __a + __b;
+}
+
 #endif
 
 enum {
@@ -15186,6 +15250,8 @@ extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(p0, float16x8_t), __ARM_mve_coerce(p1, float16x8_t)), \
+  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(p0, float32x4_t), __ARM_mve_coerce(p1, float32x4_t)), \
   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md
index 5c1a11bf7dee7590d668e7ec5e3b068789b3b3db..f3cbc0d03564ef8866226f836a27ed6051353f5d 100644
--- a/gcc/config/arm/iterators.md
+++ b/gcc/config/arm/iterators.md
@@ -66,6 +66,14 @@
 ;; Integer and float modes supported by Neon and IWMMXT.
 (define_mode_iterator VALL [V2DI V2SI V4HI V8QI V2SF V4SI V8HI V16QI V4SF])
 
+;; Integer and float modes supported by Neon, IWMMXT and MVE, used by
+;; arithmetic epxand patterns.
+(define_mode_iterator VNIM [V16QI V8HI V4SI V4SF])
+
+;; Integer and float modes supported by Neon and IWMMXT but not MVE, used by
+;; arithmetic epxand patterns.
+(define_mode_iterator VNINOTM [V2SI V4HI V8QI V2SF V2DI])
+
 ;; Integer and float modes supported by Neon, IWMMXT and MVE.
 (define_mode_iterator VNIM1 [V16QI V8HI V4SI V4SF V2DI])
 
diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index 5667882e941bac30d5e89b0ff866948d06bd3d5a..7578b8070282a3633d1e6f5fde5ba855ff8e553c 100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -9643,3 +9643,31 @@
    return "";
 }
   [(set_attr "length" "4")])
+
+;;
+;; [vaddq_s, vaddq_u])
+;;
+(define_insn "mve_vaddq<mode>"
+  [
+   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+	(plus:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
+		    (match_operand:MVE_2 2 "s_register_operand" "w")))
+  ]
+  "TARGET_HAVE_MVE"
+  "vadd.i%#<V_sz_elem>  %q0, %q1, %q2"
+  [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vaddq_f])
+;;
+(define_insn "mve_vaddq_f<mode>"
+  [
+   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
+	(plus:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
+		    (match_operand:MVE_0 2 "s_register_operand" "w")))
+  ]
+  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+  "vadd.f%#<V_sz_elem> %q0, %q1, %q2"
+  [(set_attr "type" "mve_move")
+])
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index fbfeef233f38831a5cb256622625879d15209431..272e6c1e7cfc4c42065d1d50131ef49d89052d91 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -519,18 +519,30 @@
 ;; As with SFmode, full support for HFmode vector arithmetic is only available
 ;; when flag-unsafe-math-optimizations is enabled.
 
-(define_insn "add<mode>3"
+;; Add pattern with modes V8HF and V4HF is split into separate patterns to add
+;; support for standard pattern addv8hf3 in MVE.  Following pattern is called
+;; from "addv8hf3" standard pattern inside vec-common.md file.
+
+(define_insn "addv8hf3_neon"
   [(set
-    (match_operand:VH 0 "s_register_operand" "=w")
-    (plus:VH
-     (match_operand:VH 1 "s_register_operand" "w")
-     (match_operand:VH 2 "s_register_operand" "w")))]
+    (match_operand:V8HF 0 "s_register_operand" "=w")
+    (plus:V8HF
+     (match_operand:V8HF 1 "s_register_operand" "w")
+     (match_operand:V8HF 2 "s_register_operand" "w")))]
  "TARGET_NEON_FP16INST && flag_unsafe_math_optimizations"
- "vadd.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
- [(set (attr "type")
-   (if_then_else (match_test "<Is_float_mode>")
-    (const_string "neon_fp_addsub_s<q>")
-    (const_string "neon_add<q>")))]
+ "vadd.f16\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_fp_addsub_s_q")]
+)
+
+(define_insn "addv4hf3"
+  [(set
+    (match_operand:V4HF 0 "s_register_operand" "=w")
+    (plus:V4HF
+     (match_operand:V4HF 1 "s_register_operand" "w")
+     (match_operand:V4HF 2 "s_register_operand" "w")))]
+ "TARGET_NEON_FP16INST && flag_unsafe_math_optimizations"
+ "vadd.f16\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_fp_addsub_s_q")]
 )
 
 (define_insn "add<mode>3_fp16"
diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-common.md
index 916e4914a6267f928c3d3229cb9907e6fb79b222..786daa628510a5def50530c5b459bece45a0007c 100644
--- a/gcc/config/arm/vec-common.md
+++ b/gcc/config/arm/vec-common.md
@@ -77,19 +77,51 @@
      }
 })
 
+;; Vector arithmetic.  Expanders are blank, then unnamed insns implement
+;; patterns separately for Neon, IWMMXT and MVE.
+
+(define_expand "add<mode>3"
+  [(set (match_operand:VNIM 0 "s_register_operand")
+	(plus:VNIM (match_operand:VNIM 1 "s_register_operand")
+		   (match_operand:VNIM 2 "s_register_operand")))]
+  "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
+		    || flag_unsafe_math_optimizations))
+   || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))
+   || (TARGET_HAVE_MVE && VALID_MVE_SI_MODE(<MODE>mode))
+   || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(<MODE>mode))"
+{
+})
+
+;; Vector arithmetic.  Expanders are blank, then unnamed insns implement
+;; patterns separately for Neon and MVE.
+
+(define_expand "addv8hf3"
+  [(set (match_operand:V8HF 0 "s_register_operand")
+	(plus:V8HF (match_operand:V8HF 1 "s_register_operand")
+		   (match_operand:V8HF 2 "s_register_operand")))]
+  "(TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(V8HFmode))
+   || (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)"
+{
+  if (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
+    emit_insn (gen_addv8hf3_neon (operands[0], operands[1], operands[2]));
+})
+
+;; Vector arithmetic.  Expanders are blank, then unnamed insns implement
+;; patterns separately for Neon and IWMMXT.
+
+(define_expand "add<mode>3"
+  [(set (match_operand:VNINOTM 0 "s_register_operand")
+	(plus:VNINOTM (match_operand:VNINOTM 1 "s_register_operand")
+		      (match_operand:VNINOTM 2 "s_register_operand")))]
+  "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
+		    || flag_unsafe_math_optimizations))
+   || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
 ;; Vector arithmetic. Expanders are blank, then unnamed insns implement
 ;; patterns separately for IWMMXT and Neon.
 
-(define_expand "add<mode>3"
-  [(set (match_operand:VALL 0 "s_register_operand")
-        (plus:VALL (match_operand:VALL 1 "s_register_operand")
-                   (match_operand:VALL 2 "s_register_operand")))]
-  "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
-		    || flag_unsafe_math_optimizations))
-   || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
-{
-})
-
 (define_expand "sub<mode>3"
   [(set (match_operand:VALL 0 "s_register_operand")
         (minus:VALL (match_operand:VALL 1 "s_register_operand")
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
new file mode 100644
index 0000000000000000000000000000000000000000..53b84d59f85ca359df68e906fc4c1e3599698a2e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b)
+{
+  return vaddq_f16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f16"  }  } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f16"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
new file mode 100644
index 0000000000000000000000000000000000000000..9bb7d1c0ecaf4c22303a2a89a41dd61c9fe6352e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b)
+{
+  return vaddq_f32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f32"  }  } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f32"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
new file mode 100644
index 0000000000000000000000000000000000000000..885473c9dfe6bf92e167cb64bd582b8f0f7b3a6a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+  return vaddq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16"  }  } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
new file mode 100644
index 0000000000000000000000000000000000000000..90ea50198176334b73a459a8a5ae1fc6db558cb0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+  return vaddq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32"  }  } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
new file mode 100644
index 0000000000000000000000000000000000000000..dbde92affe54d33939208a81b5f5edd4502dd5bd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+  return vaddq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8"  }  } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
new file mode 100644
index 0000000000000000000000000000000000000000..bc966732cdd6481d5a4cef83cc4cea2b6e91e4f5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+  return vaddq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16"  }  } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
new file mode 100644
index 0000000000000000000000000000000000000000..ed262c29406ab01f60f7e171b27af3ae3f5c2f93
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+  return vaddq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32"  }  } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
new file mode 100644
index 0000000000000000000000000000000000000000..b12e657b7af2f2ed947eb28a6d0e5dcdfde862b0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+  return vaddq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8"  }  } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8"  }  } */

Comments

Kyrylo Tkachov March 20, 2020, 11:45 a.m. UTC | #1
Hi Srinath,

> -----Original Message-----
> From: Srinath Parvathaneni <Srinath.Parvathaneni@arm.com>
> Sent: 19 March 2020 17:59
> To: gcc-patches@gcc.gnu.org
> Cc: Kyrylo Tkachov <Kyrylo.Tkachov@arm.com>
> Subject: [PATCH v2][ARM][GCC][6x]:MVE ACLE vaddq intrinsics using
> arithmetic plus operator.
> 
> Hello Kyrill,
> 
> This patch addresses all the comments in patch version v2.
> (version v2) https://gcc.gnu.org/pipermail/gcc-patches/2019-
> November/534349.html
> 
> ####
> 
> Hello,
> 
> This patch supports following MVE ACLE vaddq intrinsics. The RTL patterns
> for this intrinsics are added using arithmetic "plus" operator.
> 
> vaddq_s8, vaddq_s16, vaddq_s32, vaddq_u8, vaddq_u16, vaddq_u32,
> vaddq_f16, vaddq_f32.
> 
> Please refer to M-profile Vector Extension (MVE) intrinsics [1]  for more
> details.
> [1]  https://developer.arm.com/architectures/instruction-sets/simd-
> isas/helium/mve-intrinsics
> 
> Regression tested on arm-none-eabi and found no regressions.
> 
> Ok for trunk?

Thanks, I've pushed this patch to master.
Kyrill

> 
> Thanks,
> Srinath.
> 
> gcc/ChangeLog:
> 
> 2020-03-19  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
> 	    Andre Vieira  <andre.simoesdiasvieira@arm.com>
>             Mihail Ionescu  <mihail.ionescu@arm.com>
> 
> 	* config/arm/arm_mve.h (vaddq_s8): Define macro.
> 	(vaddq_s16): Likewise.
> 	(vaddq_s32): Likewise.
> 	(vaddq_u8): Likewise.
> 	(vaddq_u16): Likewise.
> 	(vaddq_u32): Likewise.
> 	(vaddq_f16): Likewise.
> 	(vaddq_f32): Likewise.
> 	(__arm_vaddq_s8): Define intrinsic.
> 	(__arm_vaddq_s16): Likewise.
> 	(__arm_vaddq_s32): Likewise.
> 	(__arm_vaddq_u8): Likewise.
> 	(__arm_vaddq_u16): Likewise.
> 	(__arm_vaddq_u32): Likewise.
> 	(__arm_vaddq_f16): Likewise.
> 	(__arm_vaddq_f32): Likewise.
> 	(vaddq): Define polymorphic variant.
> 	* config/arm/iterators.md (VNIM): Define mode iterator for common
> types
> 	Neon, IWMMXT and MVE.
> 	(VNINOTM): Likewise.
> 	* config/arm/mve.md (mve_vaddq<mode>): Define RTL pattern.
> 	(mve_vaddq_f<mode>): Define RTL pattern.
> 	* config/arm/neon.md (add<mode>3): Rename to addv4hf3 RTL
> pattern.
> 	(addv8hf3_neon): Define RTL pattern.
> 	* config/arm/vec-common.md (add<mode>3): Modify standard add
> RTL pattern
> 	to support MVE.
> 	(addv8hf3): Define standard RTL pattern for MVE and Neon.
> 	(add<mode>3): Modify existing standard add RTL pattern for Neon
> and IWMMXT.
> 
> gcc/testsuite/ChangeLog:
> 
> 2020-03-19  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
> 	    Andre Vieira  <andre.simoesdiasvieira@arm.com>
>             Mihail Ionescu  <mihail.ionescu@arm.com>
> 
> 	* gcc.target/arm/mve/intrinsics/vaddq_f16.c: New test.
> 	* gcc.target/arm/mve/intrinsics/vaddq_f32.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vaddq_s16.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vaddq_s32.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vaddq_s8.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vaddq_u16.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vaddq_u32.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vaddq_u8.c: Likewise.
> 
> 
> ###############     Attachment also inlined for ease of reply
> ###############
> 
> 
> diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h index
> 5ea42bd6a5bd98d5c77a0e7da3464ba6b431770b..55c256910bb7f4c616ea59
> 2be699f7f4fc3f17f7 100644
> --- a/gcc/config/arm/arm_mve.h
> +++ b/gcc/config/arm/arm_mve.h
> @@ -1898,6 +1898,14 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
> #define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
> __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
> #define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
> __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
> #define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
> __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
> +#define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b) #define
> +vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b) #define vaddq_s32(__a,
> +__b) __arm_vaddq_s32(__a, __b) #define vaddq_u8(__a, __b)
> +__arm_vaddq_u8(__a, __b) #define vaddq_u16(__a, __b)
> +__arm_vaddq_u16(__a, __b) #define vaddq_u32(__a, __b)
> +__arm_vaddq_u32(__a, __b) #define vaddq_f16(__a, __b)
> +__arm_vaddq_f16(__a, __b) #define vaddq_f32(__a, __b)
> +__arm_vaddq_f32(__a, __b)
>  #endif
> 
>  __extension__ extern __inline void
> @@ -12341,6 +12349,48 @@ __arm_vstrwq_scatter_shifted_offset_u32
> (uint32_t * __base, uint32x4_t __offset,
>    __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *)
> __base, __offset, __value);  }
> 
> +__extension__ extern __inline int8x16_t __attribute__
> +((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vaddq_s8 (int8x16_t __a, int8x16_t __b) {
> +  return __a + __b;
> +}
> +
> +__extension__ extern __inline int16x8_t __attribute__
> +((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vaddq_s16 (int16x8_t __a, int16x8_t __b) {
> +  return __a + __b;
> +}
> +
> +__extension__ extern __inline int32x4_t __attribute__
> +((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vaddq_s32 (int32x4_t __a, int32x4_t __b) {
> +  return __a + __b;
> +}
> +
> +__extension__ extern __inline uint8x16_t __attribute__
> +((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b) {
> +  return __a + __b;
> +}
> +
> +__extension__ extern __inline uint16x8_t __attribute__
> +((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b) {
> +  return __a + __b;
> +}
> +
> +__extension__ extern __inline uint32x4_t __attribute__
> +((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b) {
> +  return __a + __b;
> +}
> +
>  #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point.  */
> 
>  __extension__ extern __inline void
> @@ -14707,6 +14757,20 @@ __arm_vstrwq_scatter_shifted_offset_p_f32
> (float32_t * __base, uint32x4_t __offs
>    __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf (__base, __offset,
> __value, __p);  }
> 
> +__extension__ extern __inline float16x8_t __attribute__
> +((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vaddq_f16 (float16x8_t __a, float16x8_t __b) {
> +  return __a + __b;
> +}
> +
> +__extension__ extern __inline float32x4_t __attribute__
> +((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vaddq_f32 (float32x4_t __a, float32x4_t __b) {
> +  return __a + __b;
> +}
> +
>  #endif
> 
>  enum {
> @@ -15186,6 +15250,8 @@ extern void *__ARM_undef;
>    int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
> __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce(__p1, uint8x16_t)), \
>    int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, uint16x8_t)), \
>    int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]:
> + __arm_vaddq_f16 (__ARM_mve_coerce(p0, float16x8_t),
> + __ARM_mve_coerce(p1, float16x8_t)), \  int
> + (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]:
> + __arm_vaddq_f32 (__ARM_mve_coerce(p0, float32x4_t),
> + __ARM_mve_coerce(p1, float32x4_t)), \
>    int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]:
> __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce(__p1, uint8_t)), \
>    int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]:
> __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, uint16_t)), \
>    int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]:
> __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, uint32_t)), \ diff --git
> a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md index
> 5c1a11bf7dee7590d668e7ec5e3b068789b3b3db..f3cbc0d03564ef8866226f83
> 6a27ed6051353f5d 100644
> --- a/gcc/config/arm/iterators.md
> +++ b/gcc/config/arm/iterators.md
> @@ -66,6 +66,14 @@
>  ;; Integer and float modes supported by Neon and IWMMXT.
>  (define_mode_iterator VALL [V2DI V2SI V4HI V8QI V2SF V4SI V8HI V16QI
> V4SF])
> 
> +;; Integer and float modes supported by Neon, IWMMXT and MVE, used by
> +;; arithmetic epxand patterns.
> +(define_mode_iterator VNIM [V16QI V8HI V4SI V4SF])
> +
> +;; Integer and float modes supported by Neon and IWMMXT but not MVE,
> +used by ;; arithmetic epxand patterns.
> +(define_mode_iterator VNINOTM [V2SI V4HI V8QI V2SF V2DI])
> +
>  ;; Integer and float modes supported by Neon, IWMMXT and MVE.
>  (define_mode_iterator VNIM1 [V16QI V8HI V4SI V4SF V2DI])
> 
> diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md index
> 5667882e941bac30d5e89b0ff866948d06bd3d5a..7578b8070282a3633d1e6f5
> fde5ba855ff8e553c 100644
> --- a/gcc/config/arm/mve.md
> +++ b/gcc/config/arm/mve.md
> @@ -9643,3 +9643,31 @@
>     return "";
>  }
>    [(set_attr "length" "4")])
> +
> +;;
> +;; [vaddq_s, vaddq_u])
> +;;
> +(define_insn "mve_vaddq<mode>"
> +  [
> +   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
> +	(plus:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
> +		    (match_operand:MVE_2 2 "s_register_operand" "w")))
> +  ]
> +  "TARGET_HAVE_MVE"
> +  "vadd.i%#<V_sz_elem>  %q0, %q1, %q2"
> +  [(set_attr "type" "mve_move")
> +])
> +
> +;;
> +;; [vaddq_f])
> +;;
> +(define_insn "mve_vaddq_f<mode>"
> +  [
> +   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
> +	(plus:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
> +		    (match_operand:MVE_0 2 "s_register_operand" "w")))
> +  ]
> +  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
> +  "vadd.f%#<V_sz_elem> %q0, %q1, %q2"
> +  [(set_attr "type" "mve_move")
> +])
> diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md index
> fbfeef233f38831a5cb256622625879d15209431..272e6c1e7cfc4c42065d1d501
> 31ef49d89052d91 100644
> --- a/gcc/config/arm/neon.md
> +++ b/gcc/config/arm/neon.md
> @@ -519,18 +519,30 @@
>  ;; As with SFmode, full support for HFmode vector arithmetic is only
> available  ;; when flag-unsafe-math-optimizations is enabled.
> 
> -(define_insn "add<mode>3"
> +;; Add pattern with modes V8HF and V4HF is split into separate patterns
> +to add ;; support for standard pattern addv8hf3 in MVE.  Following
> +pattern is called ;; from "addv8hf3" standard pattern inside vec-
> common.md file.
> +
> +(define_insn "addv8hf3_neon"
>    [(set
> -    (match_operand:VH 0 "s_register_operand" "=w")
> -    (plus:VH
> -     (match_operand:VH 1 "s_register_operand" "w")
> -     (match_operand:VH 2 "s_register_operand" "w")))]
> +    (match_operand:V8HF 0 "s_register_operand" "=w")
> +    (plus:V8HF
> +     (match_operand:V8HF 1 "s_register_operand" "w")
> +     (match_operand:V8HF 2 "s_register_operand" "w")))]
>   "TARGET_NEON_FP16INST && flag_unsafe_math_optimizations"
> - "vadd.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
> - [(set (attr "type")
> -   (if_then_else (match_test "<Is_float_mode>")
> -    (const_string "neon_fp_addsub_s<q>")
> -    (const_string "neon_add<q>")))]
> + "vadd.f16\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
> + [(set_attr "type" "neon_fp_addsub_s_q")]
> +)
> +
> +(define_insn "addv4hf3"
> +  [(set
> +    (match_operand:V4HF 0 "s_register_operand" "=w")
> +    (plus:V4HF
> +     (match_operand:V4HF 1 "s_register_operand" "w")
> +     (match_operand:V4HF 2 "s_register_operand" "w")))]
> +"TARGET_NEON_FP16INST && flag_unsafe_math_optimizations"
> + "vadd.f16\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
> + [(set_attr "type" "neon_fp_addsub_s_q")]
>  )
> 
>  (define_insn "add<mode>3_fp16"
> diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-
> common.md index
> 916e4914a6267f928c3d3229cb9907e6fb79b222..786daa628510a5def50530c5
> b459bece45a0007c 100644
> --- a/gcc/config/arm/vec-common.md
> +++ b/gcc/config/arm/vec-common.md
> @@ -77,19 +77,51 @@
>       }
>  })
> 
> +;; Vector arithmetic.  Expanders are blank, then unnamed insns
> +implement ;; patterns separately for Neon, IWMMXT and MVE.
> +
> +(define_expand "add<mode>3"
> +  [(set (match_operand:VNIM 0 "s_register_operand")
> +	(plus:VNIM (match_operand:VNIM 1 "s_register_operand")
> +		   (match_operand:VNIM 2 "s_register_operand")))]
> +  "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode !=
> V4SFmode)
> +		    || flag_unsafe_math_optimizations))
> +   || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE
> (<MODE>mode))
> +   || (TARGET_HAVE_MVE && VALID_MVE_SI_MODE(<MODE>mode))
> +   || (TARGET_HAVE_MVE_FLOAT &&
> VALID_MVE_SF_MODE(<MODE>mode))"
> +{
> +})
> +
> +;; Vector arithmetic.  Expanders are blank, then unnamed insns
> +implement ;; patterns separately for Neon and MVE.
> +
> +(define_expand "addv8hf3"
> +  [(set (match_operand:V8HF 0 "s_register_operand")
> +	(plus:V8HF (match_operand:V8HF 1 "s_register_operand")
> +		   (match_operand:V8HF 2 "s_register_operand")))]
> +  "(TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(V8HFmode))
> +   || (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)"
> +{
> +  if (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
> +    emit_insn (gen_addv8hf3_neon (operands[0], operands[1],
> +operands[2]));
> +})
> +
> +;; Vector arithmetic.  Expanders are blank, then unnamed insns
> +implement ;; patterns separately for Neon and IWMMXT.
> +
> +(define_expand "add<mode>3"
> +  [(set (match_operand:VNINOTM 0 "s_register_operand")
> +	(plus:VNINOTM (match_operand:VNINOTM 1 "s_register_operand")
> +		      (match_operand:VNINOTM 2 "s_register_operand")))]
> +  "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode !=
> V4SFmode)
> +		    || flag_unsafe_math_optimizations))
> +   || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE
> (<MODE>mode))"
> +{
> +})
> +
>  ;; Vector arithmetic. Expanders are blank, then unnamed insns implement  ;;
> patterns separately for IWMMXT and Neon.
> 
> -(define_expand "add<mode>3"
> -  [(set (match_operand:VALL 0 "s_register_operand")
> -        (plus:VALL (match_operand:VALL 1 "s_register_operand")
> -                   (match_operand:VALL 2 "s_register_operand")))]
> -  "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode !=
> V4SFmode)
> -		    || flag_unsafe_math_optimizations))
> -   || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE
> (<MODE>mode))"
> -{
> -})
> -
>  (define_expand "sub<mode>3"
>    [(set (match_operand:VALL 0 "s_register_operand")
>          (minus:VALL (match_operand:VALL 1 "s_register_operand") diff --git
> a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..53b84d59f85ca359df68e90
> 6fc4c1e3599698a2e
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
> @@ -0,0 +1,22 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +
> +float16x8_t
> +foo (float16x8_t a, float16x8_t b)
> +{
> +  return vaddq_f16 (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.f16"  }  } */
> +
> +float16x8_t
> +foo1 (float16x8_t a, float16x8_t b)
> +{
> +  return vaddq (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.f16"  }  } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..9bb7d1c0ecaf4c22303a2a8
> 9a41dd61c9fe6352e
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
> @@ -0,0 +1,22 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +
> +float32x4_t
> +foo (float32x4_t a, float32x4_t b)
> +{
> +  return vaddq_f32 (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.f32"  }  } */
> +
> +float32x4_t
> +foo1 (float32x4_t a, float32x4_t b)
> +{
> +  return vaddq (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.f32"  }  } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..885473c9dfe6bf92e167cb6
> 4bd582b8f0f7b3a6a
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
> @@ -0,0 +1,22 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_ok } */
> +/* { dg-add-options arm_v8_1m_mve } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +
> +int16x8_t
> +foo (int16x8_t a, int16x8_t b)
> +{
> +  return vaddq_s16 (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.i16"  }  } */
> +
> +int16x8_t
> +foo1 (int16x8_t a, int16x8_t b)
> +{
> +  return vaddq (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.i16"  }  } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..90ea50198176334b73a459
> a8a5ae1fc6db558cb0
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
> @@ -0,0 +1,22 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_ok } */
> +/* { dg-add-options arm_v8_1m_mve } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +
> +int32x4_t
> +foo (int32x4_t a, int32x4_t b)
> +{
> +  return vaddq_s32 (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.i32"  }  } */
> +
> +int32x4_t
> +foo1 (int32x4_t a, int32x4_t b)
> +{
> +  return vaddq (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.i32"  }  } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..dbde92affe54d33939208a8
> 1b5f5edd4502dd5bd
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
> @@ -0,0 +1,22 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_ok } */
> +/* { dg-add-options arm_v8_1m_mve } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +
> +int8x16_t
> +foo (int8x16_t a, int8x16_t b)
> +{
> +  return vaddq_s8 (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.i8"  }  } */
> +
> +int8x16_t
> +foo1 (int8x16_t a, int8x16_t b)
> +{
> +  return vaddq (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.i8"  }  } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..bc966732cdd6481d5a4cef8
> 3cc4cea2b6e91e4f5
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
> @@ -0,0 +1,22 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_ok } */
> +/* { dg-add-options arm_v8_1m_mve } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +
> +uint16x8_t
> +foo (uint16x8_t a, uint16x8_t b)
> +{
> +  return vaddq_u16 (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.i16"  }  } */
> +
> +uint16x8_t
> +foo1 (uint16x8_t a, uint16x8_t b)
> +{
> +  return vaddq (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.i16"  }  } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..ed262c29406ab01f60f7e17
> 1b27af3ae3f5c2f93
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
> @@ -0,0 +1,22 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_ok } */
> +/* { dg-add-options arm_v8_1m_mve } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +
> +uint32x4_t
> +foo (uint32x4_t a, uint32x4_t b)
> +{
> +  return vaddq_u32 (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.i32"  }  } */
> +
> +uint32x4_t
> +foo1 (uint32x4_t a, uint32x4_t b)
> +{
> +  return vaddq (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.i32"  }  } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..b12e657b7af2f2ed947eb28
> a6d0e5dcdfde862b0
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
> @@ -0,0 +1,22 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_ok } */
> +/* { dg-add-options arm_v8_1m_mve } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +
> +uint8x16_t
> +foo (uint8x16_t a, uint8x16_t b)
> +{
> +  return vaddq_u8 (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.i8"  }  } */
> +
> +uint8x16_t
> +foo1 (uint8x16_t a, uint8x16_t b)
> +{
> +  return vaddq (a, b);
> +}
> +
> +/* { dg-final { scan-assembler "vadd.i8"  }  } */
diff mbox series

Patch

diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
index 5ea42bd6a5bd98d5c77a0e7da3464ba6b431770b..55c256910bb7f4c616ea592be699f7f4fc3f17f7 100644
--- a/gcc/config/arm/arm_mve.h
+++ b/gcc/config/arm/arm_mve.h
@@ -1898,6 +1898,14 @@  typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
 #define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
 #define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
 #define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
+#define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b)
+#define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b)
+#define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b)
+#define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b)
+#define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b)
+#define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b)
+#define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b)
+#define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b)
 #endif
 
 __extension__ extern __inline void
@@ -12341,6 +12349,48 @@  __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset,
   __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value);
 }
 
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+  return __a + __b;
+}
+
 #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point.  */
 
 __extension__ extern __inline void
@@ -14707,6 +14757,20 @@  __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offs
   __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf (__base, __offset, __value, __p);
 }
 
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_f16 (float16x8_t __a, float16x8_t __b)
+{
+  return __a + __b;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+  return __a + __b;
+}
+
 #endif
 
 enum {
@@ -15186,6 +15250,8 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(p0, float16x8_t), __ARM_mve_coerce(p1, float16x8_t)), \
+  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(p0, float32x4_t), __ARM_mve_coerce(p1, float32x4_t)), \
   int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md
index 5c1a11bf7dee7590d668e7ec5e3b068789b3b3db..f3cbc0d03564ef8866226f836a27ed6051353f5d 100644
--- a/gcc/config/arm/iterators.md
+++ b/gcc/config/arm/iterators.md
@@ -66,6 +66,14 @@ 
 ;; Integer and float modes supported by Neon and IWMMXT.
 (define_mode_iterator VALL [V2DI V2SI V4HI V8QI V2SF V4SI V8HI V16QI V4SF])
 
+;; Integer and float modes supported by Neon, IWMMXT and MVE, used by
+;; arithmetic epxand patterns.
+(define_mode_iterator VNIM [V16QI V8HI V4SI V4SF])
+
+;; Integer and float modes supported by Neon and IWMMXT but not MVE, used by
+;; arithmetic epxand patterns.
+(define_mode_iterator VNINOTM [V2SI V4HI V8QI V2SF V2DI])
+
 ;; Integer and float modes supported by Neon, IWMMXT and MVE.
 (define_mode_iterator VNIM1 [V16QI V8HI V4SI V4SF V2DI])
 
diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index 5667882e941bac30d5e89b0ff866948d06bd3d5a..7578b8070282a3633d1e6f5fde5ba855ff8e553c 100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -9643,3 +9643,31 @@ 
    return "";
 }
   [(set_attr "length" "4")])
+
+;;
+;; [vaddq_s, vaddq_u])
+;;
+(define_insn "mve_vaddq<mode>"
+  [
+   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+	(plus:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
+		    (match_operand:MVE_2 2 "s_register_operand" "w")))
+  ]
+  "TARGET_HAVE_MVE"
+  "vadd.i%#<V_sz_elem>  %q0, %q1, %q2"
+  [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vaddq_f])
+;;
+(define_insn "mve_vaddq_f<mode>"
+  [
+   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
+	(plus:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
+		    (match_operand:MVE_0 2 "s_register_operand" "w")))
+  ]
+  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+  "vadd.f%#<V_sz_elem> %q0, %q1, %q2"
+  [(set_attr "type" "mve_move")
+])
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index fbfeef233f38831a5cb256622625879d15209431..272e6c1e7cfc4c42065d1d50131ef49d89052d91 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -519,18 +519,30 @@ 
 ;; As with SFmode, full support for HFmode vector arithmetic is only available
 ;; when flag-unsafe-math-optimizations is enabled.
 
-(define_insn "add<mode>3"
+;; Add pattern with modes V8HF and V4HF is split into separate patterns to add
+;; support for standard pattern addv8hf3 in MVE.  Following pattern is called
+;; from "addv8hf3" standard pattern inside vec-common.md file.
+
+(define_insn "addv8hf3_neon"
   [(set
-    (match_operand:VH 0 "s_register_operand" "=w")
-    (plus:VH
-     (match_operand:VH 1 "s_register_operand" "w")
-     (match_operand:VH 2 "s_register_operand" "w")))]
+    (match_operand:V8HF 0 "s_register_operand" "=w")
+    (plus:V8HF
+     (match_operand:V8HF 1 "s_register_operand" "w")
+     (match_operand:V8HF 2 "s_register_operand" "w")))]
  "TARGET_NEON_FP16INST && flag_unsafe_math_optimizations"
- "vadd.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
- [(set (attr "type")
-   (if_then_else (match_test "<Is_float_mode>")
-    (const_string "neon_fp_addsub_s<q>")
-    (const_string "neon_add<q>")))]
+ "vadd.f16\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_fp_addsub_s_q")]
+)
+
+(define_insn "addv4hf3"
+  [(set
+    (match_operand:V4HF 0 "s_register_operand" "=w")
+    (plus:V4HF
+     (match_operand:V4HF 1 "s_register_operand" "w")
+     (match_operand:V4HF 2 "s_register_operand" "w")))]
+ "TARGET_NEON_FP16INST && flag_unsafe_math_optimizations"
+ "vadd.f16\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_fp_addsub_s_q")]
 )
 
 (define_insn "add<mode>3_fp16"
diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-common.md
index 916e4914a6267f928c3d3229cb9907e6fb79b222..786daa628510a5def50530c5b459bece45a0007c 100644
--- a/gcc/config/arm/vec-common.md
+++ b/gcc/config/arm/vec-common.md
@@ -77,19 +77,51 @@ 
      }
 })
 
+;; Vector arithmetic.  Expanders are blank, then unnamed insns implement
+;; patterns separately for Neon, IWMMXT and MVE.
+
+(define_expand "add<mode>3"
+  [(set (match_operand:VNIM 0 "s_register_operand")
+	(plus:VNIM (match_operand:VNIM 1 "s_register_operand")
+		   (match_operand:VNIM 2 "s_register_operand")))]
+  "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
+		    || flag_unsafe_math_optimizations))
+   || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))
+   || (TARGET_HAVE_MVE && VALID_MVE_SI_MODE(<MODE>mode))
+   || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(<MODE>mode))"
+{
+})
+
+;; Vector arithmetic.  Expanders are blank, then unnamed insns implement
+;; patterns separately for Neon and MVE.
+
+(define_expand "addv8hf3"
+  [(set (match_operand:V8HF 0 "s_register_operand")
+	(plus:V8HF (match_operand:V8HF 1 "s_register_operand")
+		   (match_operand:V8HF 2 "s_register_operand")))]
+  "(TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(V8HFmode))
+   || (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)"
+{
+  if (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
+    emit_insn (gen_addv8hf3_neon (operands[0], operands[1], operands[2]));
+})
+
+;; Vector arithmetic.  Expanders are blank, then unnamed insns implement
+;; patterns separately for Neon and IWMMXT.
+
+(define_expand "add<mode>3"
+  [(set (match_operand:VNINOTM 0 "s_register_operand")
+	(plus:VNINOTM (match_operand:VNINOTM 1 "s_register_operand")
+		      (match_operand:VNINOTM 2 "s_register_operand")))]
+  "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
+		    || flag_unsafe_math_optimizations))
+   || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
 ;; Vector arithmetic. Expanders are blank, then unnamed insns implement
 ;; patterns separately for IWMMXT and Neon.
 
-(define_expand "add<mode>3"
-  [(set (match_operand:VALL 0 "s_register_operand")
-        (plus:VALL (match_operand:VALL 1 "s_register_operand")
-                   (match_operand:VALL 2 "s_register_operand")))]
-  "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
-		    || flag_unsafe_math_optimizations))
-   || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
-{
-})
-
 (define_expand "sub<mode>3"
   [(set (match_operand:VALL 0 "s_register_operand")
         (minus:VALL (match_operand:VALL 1 "s_register_operand")
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
new file mode 100644
index 0000000000000000000000000000000000000000..53b84d59f85ca359df68e906fc4c1e3599698a2e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
@@ -0,0 +1,22 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b)
+{
+  return vaddq_f16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f16"  }  } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f16"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
new file mode 100644
index 0000000000000000000000000000000000000000..9bb7d1c0ecaf4c22303a2a89a41dd61c9fe6352e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
@@ -0,0 +1,22 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b)
+{
+  return vaddq_f32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f32"  }  } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f32"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
new file mode 100644
index 0000000000000000000000000000000000000000..885473c9dfe6bf92e167cb64bd582b8f0f7b3a6a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
@@ -0,0 +1,22 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+  return vaddq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16"  }  } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
new file mode 100644
index 0000000000000000000000000000000000000000..90ea50198176334b73a459a8a5ae1fc6db558cb0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
@@ -0,0 +1,22 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+  return vaddq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32"  }  } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
new file mode 100644
index 0000000000000000000000000000000000000000..dbde92affe54d33939208a81b5f5edd4502dd5bd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
@@ -0,0 +1,22 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+  return vaddq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8"  }  } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
new file mode 100644
index 0000000000000000000000000000000000000000..bc966732cdd6481d5a4cef83cc4cea2b6e91e4f5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
@@ -0,0 +1,22 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+  return vaddq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16"  }  } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
new file mode 100644
index 0000000000000000000000000000000000000000..ed262c29406ab01f60f7e171b27af3ae3f5c2f93
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
@@ -0,0 +1,22 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+  return vaddq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32"  }  } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
new file mode 100644
index 0000000000000000000000000000000000000000..b12e657b7af2f2ed947eb28a6d0e5dcdfde862b0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
@@ -0,0 +1,22 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+  return vaddq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8"  }  } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+  return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8"  }  } */