diff mbox

[v2,AArch64,6/6] Reimplement vpadd intrinsics & extend rtl patterns to all modes

Message ID 758393e5-3257-7aab-8704-592aaafc1917@foss.arm.com
State New
Headers show

Commit Message

Jiong Wang June 6, 2016, 1:40 p.m. UTC
These intrinsics was implemented by inline assembly using "faddp" instruction.
There was a pattern "aarch64_addpv4sf" which supportsV4SF mode only while we can
extend this pattern to support VDQF mode, then we can reimplement these
intrinsics through builtlins.

gcc/
2016-06-06  Jiong Wang<jiong.wang@arm.com>

         * config/aarch64/aarch64-builtins.def (faddp): New builtins for modes in VDQF.
         * config/aarch64/aarch64-simd.md (aarch64_faddp<mode>): New.
         (arch64_addpv4sf): Delete.
         (reduc_plus_scal_v4sf): Use "gen_aarch64_faddpv4sf" instead of
         "gen_aarch64_addpv4sf".
         * config/aarch64/arm_neon.h (vpadd_f32): Remove inline assembly.  Use
         builtin.
         (vpadds_f32): Likewise.
         (vpaddq_f32): Likewise.
         (vpaddq_f64): Likewise.

Comments

James Greenhalgh June 8, 2016, 9:58 a.m. UTC | #1
On Mon, Jun 06, 2016 at 02:40:55PM +0100, Jiong Wang wrote:
> These intrinsics was implemented by inline assembly using "faddp" instruction.
> There was a pattern "aarch64_addpv4sf" which supportsV4SF mode only while we can
> extend this pattern to support VDQF mode, then we can reimplement these
> intrinsics through builtlins.

OK. But watch your ChangeLog format and line length.

Thanks again for this second spin of this patch set. I'm much happier
knowing that we don't have to revisit some of these intrinsics.

Thanks,
James

> 
> gcc/
> 2016-06-06  Jiong Wang<jiong.wang@arm.com>
> 
>         * config/aarch64/aarch64-builtins.def (faddp): New builtins for modes in VDQF.
>         * config/aarch64/aarch64-simd.md (aarch64_faddp<mode>): New.
>         (arch64_addpv4sf): Delete.
>         (reduc_plus_scal_v4sf): Use "gen_aarch64_faddpv4sf" instead of
>         "gen_aarch64_addpv4sf".
>         * config/aarch64/arm_neon.h (vpadd_f32): Remove inline assembly.  Use
>         builtin.
>         (vpadds_f32): Likewise.
>         (vpaddq_f32): Likewise.
>         (vpaddq_f64): Likewise.
>
diff mbox

Patch

diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index deab3450ab74fcd6dfcf8267fa9cedfc1423ca4e..1348e7c198763b24d092f774a0ff25e4d0fd1787 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -468,3 +468,6 @@ 
 
   /* Implemented by fabd<mode>3.  */
   BUILTIN_VALLF (BINOP, fabd, 3)
+
+  /* Implemented by aarch64_faddp<mode>.  */
+  BUILTIN_VDQF (BINOP, faddp, 0)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index ad8b9c1d0c155d022be2e7e7c426120b551f3f2b..f8d3e766a53736a4b87ba016caccd085eb793bda 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1992,6 +1992,16 @@ 
   }
 )
 
+(define_insn "aarch64_faddp<mode>"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+       (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")
+		     (match_operand:VDQF 2 "register_operand" "w")]
+		     UNSPEC_FADDV))]
+ "TARGET_SIMD"
+ "faddp\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+  [(set_attr "type" "neon_fp_reduc_add_<Vetype><q>")]
+)
+
 (define_insn "aarch64_reduc_plus_internal<mode>"
  [(set (match_operand:VDQV 0 "register_operand" "=w")
        (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
@@ -2019,15 +2029,6 @@ 
   [(set_attr "type" "neon_fp_reduc_add_<Vetype><q>")]
 )
 
-(define_insn "aarch64_addpv4sf"
- [(set (match_operand:V4SF 0 "register_operand" "=w")
-       (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
-		    UNSPEC_FADDV))]
- "TARGET_SIMD"
- "faddp\\t%0.4s, %1.4s, %1.4s"
-  [(set_attr "type" "neon_fp_reduc_add_s_q")]
-)
-
 (define_expand "reduc_plus_scal_v4sf"
  [(set (match_operand:SF 0 "register_operand")
        (unspec:V4SF [(match_operand:V4SF 1 "register_operand")]
@@ -2036,8 +2037,8 @@ 
 {
   rtx elt = GEN_INT (ENDIAN_LANE_N (V4SFmode, 0));
   rtx scratch = gen_reg_rtx (V4SFmode);
-  emit_insn (gen_aarch64_addpv4sf (scratch, operands[1]));
-  emit_insn (gen_aarch64_addpv4sf (scratch, scratch));
+  emit_insn (gen_aarch64_faddpv4sf (scratch, operands[1], operands[1]));
+  emit_insn (gen_aarch64_faddpv4sf (scratch, scratch, scratch));
   emit_insn (gen_aarch64_get_lanev4sf (operands[0], scratch, elt));
   DONE;
 })
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 9e966e47789646ed968a081c1fc4cb76b45537af..13a4ab80cf7b0470d8ec8b07e0ed1988f8f4e66d 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -8225,17 +8225,6 @@  vpadalq_u32 (uint64x2_t a, uint32x4_t b)
   return result;
 }
 
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vpadd_f32 (float32x2_t a, float32x2_t b)
-{
-  float32x2_t result;
-  __asm__ ("faddp %0.2s,%1.2s,%2.2s"
-           : "=w"(result)
-           : "w"(a), "w"(b)
-           : /* No clobbers */);
-  return result;
-}
-
 __extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
 vpaddl_s8 (int8x8_t a)
 {
@@ -8368,28 +8357,6 @@  vpaddlq_u32 (uint32x4_t a)
   return result;
 }
 
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vpaddq_f32 (float32x4_t a, float32x4_t b)
-{
-  float32x4_t result;
-  __asm__ ("faddp %0.4s,%1.4s,%2.4s"
-           : "=w"(result)
-           : "w"(a), "w"(b)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vpaddq_f64 (float64x2_t a, float64x2_t b)
-{
-  float64x2_t result;
-  __asm__ ("faddp %0.2d,%1.2d,%2.2d"
-           : "=w"(result)
-           : "w"(a), "w"(b)
-           : /* No clobbers */);
-  return result;
-}
-
 __extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
 vpaddq_s8 (int8x16_t a, int8x16_t b)
 {
@@ -8478,17 +8445,6 @@  vpaddq_u64 (uint64x2_t a, uint64x2_t b)
   return result;
 }
 
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vpadds_f32 (float32x2_t a)
-{
-  float32_t result;
-  __asm__ ("faddp %s0,%1.2s"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
 __extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
 vqdmulh_n_s16 (int16x4_t a, int16_t b)
 {
@@ -18625,6 +18581,24 @@  vnegq_s64 (int64x2_t __a)
 
 /* vpadd  */
 
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+  return __builtin_aarch64_faddpv2sf (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+  return __builtin_aarch64_faddpv4sf (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpaddq_f64 (float64x2_t __a, float64x2_t __b)
+{
+  return __builtin_aarch64_faddpv2df (__a, __b);
+}
+
 __extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
 vpadd_s8 (int8x8_t __a, int8x8_t __b)
 {
@@ -18664,6 +18638,12 @@  vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
 						  (int32x2_t) __b);
 }
 
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpadds_f32 (float32x2_t __a)
+{
+  return __builtin_aarch64_reduc_plus_scal_v2sf (__a);
+}
+
 __extension__ static __inline float64_t __attribute__ ((__always_inline__))
 vpaddd_f64 (float64x2_t __a)
 {