diff mbox

[3/4] Conform vector implementation to ABI -- reorganize reduc_operations.

Message ID 528E0E09.4080408@arm.com
State New
Headers show

Commit Message

Tejas Belagod Nov. 21, 2013, 1:43 p.m. UTC
Hi,

The attached patch reorganizes reduc_* operations in aarch64-simd.md and fixes 
up lane accesses in arm_neon.h to conform to ABI changes.

Tested for aarch64-none-elf and aarch64_be-none-elf. OK for trunk?

Thanks,
Tejas Belagod
ARM.

2013-11-21  Tejas Belagod  <tejas.belagod@arm.com>

gcc/
	* config/aarch64/aarch64-simd.md (vec_pack_trunc_<mode>,
	vec_pack_trunc_v2df, vec_pack_trunc_df): Swap for big-endian.
	(reduc_<sur>plus_<mode>): Factorize V2DI into this.
	(reduc_<sur>plus_<mode>): Change this to reduc_splus_<mode> for floats
	and also change to float UNSPEC.
	(reduc_maxmin_uns>_<mode>): Remove V2DI.
	* config/aarch64/arm_neon.h (vaddv<q>_<suf><8,16,32,64>,
         vmaxv<q>_<suf><8,16,32,64>, vminv<q>_<suf><8,16,32,64>): Fix up scalar
	result access for big-endian.
         (__LANE0): New macro used to fix up lane access of 'across-lanes'
          intrinsics for big-endian.
	* config/aarch64/iterators.md (VDQV): Add V2DI.
	(VDQV_S): New.
	(vp): New mode attribute.

Comments

Marcus Shawcroft Nov. 22, 2013, 8:50 a.m. UTC | #1
On 21 November 2013 13:43, Tejas Belagod <tbelagod@arm.com> wrote:
> Hi,
>
> The attached patch reorganizes reduc_* operations in aarch64-simd.md and
> fixes up lane accesses in arm_neon.h to conform to ABI changes.
>
> Tested for aarch64-none-elf and aarch64_be-none-elf. OK for trunk?
>
> Thanks,
> Tejas Belagod
> ARM.
>
> 2013-11-21  Tejas Belagod  <tejas.belagod@arm.com>
>
> gcc/
>         * config/aarch64/aarch64-simd.md (vec_pack_trunc_<mode>,
>         vec_pack_trunc_v2df, vec_pack_trunc_df): Swap for big-endian.
>         (reduc_<sur>plus_<mode>): Factorize V2DI into this.
>         (reduc_<sur>plus_<mode>): Change this to reduc_splus_<mode> for
> floats
>         and also change to float UNSPEC.
>         (reduc_maxmin_uns>_<mode>): Remove V2DI.
>         * config/aarch64/arm_neon.h (vaddv<q>_<suf><8,16,32,64>,
>         vmaxv<q>_<suf><8,16,32,64>, vminv<q>_<suf><8,16,32,64>): Fix up
> scalar
>         result access for big-endian.
>         (__LANE0): New macro used to fix up lane access of 'across-lanes'
>          intrinsics for big-endian.
>         * config/aarch64/iterators.md (VDQV): Add V2DI.
>         (VDQV_S): New.
>         (vp): New mode attribute.
>

OK /Marcus
diff mbox

Patch

diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index b9ebdf5..10443ed 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1519,19 +1519,10 @@ 
        (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
 		    SUADDV))]
  "TARGET_SIMD"
- "addv\\t%<Vetype>0, %1.<Vtype>"
+ "add<VDQV:vp>\\t%<Vetype>0, %1.<Vtype>"
   [(set_attr "type" "neon_reduc_add<q>")]
 )
 
-(define_insn "reduc_<sur>plus_v2di"
- [(set (match_operand:V2DI 0 "register_operand" "=w")
-       (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
-		    SUADDV))]
- "TARGET_SIMD"
- "addp\\t%d0, %1.2d"
-  [(set_attr "type" "neon_reduc_add_q")]
-)
-
 (define_insn "reduc_<sur>plus_v2si"
  [(set (match_operand:V2SI 0 "register_operand" "=w")
        (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
@@ -1541,10 +1532,10 @@ 
   [(set_attr "type" "neon_reduc_add")]
 )
 
-(define_insn "reduc_<sur>plus_<mode>"
+(define_insn "reduc_splus_<mode>"
  [(set (match_operand:V2F 0 "register_operand" "=w")
        (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
-		    SUADDV))]
+		   UNSPEC_FADDV))]
  "TARGET_SIMD"
  "faddp\\t%<Vetype>0, %1.<Vtype>"
   [(set_attr "type" "neon_fp_reduc_add_<Vetype><q>")]
@@ -1559,15 +1550,14 @@ 
   [(set_attr "type" "neon_fp_reduc_add_s_q")]
 )
 
-(define_expand "reduc_<sur>plus_v4sf"
+(define_expand "reduc_splus_v4sf"
  [(set (match_operand:V4SF 0 "register_operand")
        (unspec:V4SF [(match_operand:V4SF 1 "register_operand")]
-		    SUADDV))]
+		    UNSPEC_FADDV))]
  "TARGET_SIMD"
 {
-  rtx tmp = gen_reg_rtx (V4SFmode);
-  emit_insn (gen_aarch64_addpv4sf (tmp, operands[1]));
-  emit_insn (gen_aarch64_addpv4sf (operands[0], tmp));
+  emit_insn (gen_aarch64_addpv4sf (operands[0], operands[1]));
+  emit_insn (gen_aarch64_addpv4sf (operands[0], operands[0]));
   DONE;
 })
 
@@ -1582,23 +1572,14 @@ 
 ;; 'across lanes' max and min ops.
 
 (define_insn "reduc_<maxmin_uns>_<mode>"
- [(set (match_operand:VDQV 0 "register_operand" "=w")
-       (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
+ [(set (match_operand:VDQV_S 0 "register_operand" "=w")
+       (unspec:VDQV_S [(match_operand:VDQV_S 1 "register_operand" "w")]
 		    MAXMINV))]
  "TARGET_SIMD"
  "<maxmin_uns_op>v\\t%<Vetype>0, %1.<Vtype>"
   [(set_attr "type" "neon_reduc_minmax<q>")]
 )
 
-(define_insn "reduc_<maxmin_uns>_v2di"
- [(set (match_operand:V2DI 0 "register_operand" "=w")
-       (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
-		    MAXMINV))]
- "TARGET_SIMD"
- "<maxmin_uns_op>p\\t%d0, %1.2d"
-  [(set_attr "type" "neon_reduc_minmax_q")]
-)
-
 (define_insn "reduc_<maxmin_uns>_v2si"
  [(set (match_operand:V2SI 0 "register_operand" "=w")
        (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index c33e4a9..f03d001 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -15913,118 +15913,132 @@  vaddd_u64 (uint64x1_t __a, uint64x1_t __b)
   return __a + __b;
 }
 
+#if __AARCH64EB__
+#define __LANE0(__t) ((__t) - 1)
+#else
+#define __LANE0(__t) 0
+#endif
+
 /* vaddv */
 
 __extension__ static __inline int8_t __attribute__ ((__always_inline__))
 vaddv_s8 (int8x8_t __a)
 {
-  return vget_lane_s8 (__builtin_aarch64_reduc_splus_v8qi (__a), 0);
+  return vget_lane_s8 (__builtin_aarch64_reduc_splus_v8qi (__a), __LANE0 (8));
 }
 
 __extension__ static __inline int16_t __attribute__ ((__always_inline__))
 vaddv_s16 (int16x4_t __a)
 {
-  return vget_lane_s16 (__builtin_aarch64_reduc_splus_v4hi (__a), 0);
+  return vget_lane_s16 (__builtin_aarch64_reduc_splus_v4hi (__a), __LANE0 (4));
 }
 
 __extension__ static __inline int32_t __attribute__ ((__always_inline__))
 vaddv_s32 (int32x2_t __a)
 {
-  return vget_lane_s32 (__builtin_aarch64_reduc_splus_v2si (__a), 0);
+  return vget_lane_s32 (__builtin_aarch64_reduc_splus_v2si (__a), __LANE0 (2));
 }
 
 __extension__ static __inline uint8_t __attribute__ ((__always_inline__))
 vaddv_u8 (uint8x8_t __a)
 {
   return vget_lane_u8 ((uint8x8_t)
-		__builtin_aarch64_reduc_uplus_v8qi ((int8x8_t) __a), 0);
+		__builtin_aarch64_reduc_uplus_v8qi ((int8x8_t) __a),
+		__LANE0 (8));
 }
 
 __extension__ static __inline uint16_t __attribute__ ((__always_inline__))
 vaddv_u16 (uint16x4_t __a)
 {
   return vget_lane_u16 ((uint16x4_t)
-		__builtin_aarch64_reduc_uplus_v4hi ((int16x4_t) __a), 0);
+		__builtin_aarch64_reduc_uplus_v4hi ((int16x4_t) __a),
+		__LANE0 (4));
 }
 
 __extension__ static __inline uint32_t __attribute__ ((__always_inline__))
 vaddv_u32 (uint32x2_t __a)
 {
   return vget_lane_u32 ((uint32x2_t)
-		__builtin_aarch64_reduc_uplus_v2si ((int32x2_t) __a), 0);
+		__builtin_aarch64_reduc_uplus_v2si ((int32x2_t) __a),
+		__LANE0 (2));
 }
 
 __extension__ static __inline int8_t __attribute__ ((__always_inline__))
 vaddvq_s8 (int8x16_t __a)
 {
-  return vgetq_lane_s8 (__builtin_aarch64_reduc_splus_v16qi (__a), 0);
+  return vgetq_lane_s8 (__builtin_aarch64_reduc_splus_v16qi (__a),
+			__LANE0 (16));
 }
 
 __extension__ static __inline int16_t __attribute__ ((__always_inline__))
 vaddvq_s16 (int16x8_t __a)
 {
-  return vgetq_lane_s16 (__builtin_aarch64_reduc_splus_v8hi (__a), 0);
+  return vgetq_lane_s16 (__builtin_aarch64_reduc_splus_v8hi (__a), __LANE0 (8));
 }
 
 __extension__ static __inline int32_t __attribute__ ((__always_inline__))
 vaddvq_s32 (int32x4_t __a)
 {
-  return vgetq_lane_s32 (__builtin_aarch64_reduc_splus_v4si (__a), 0);
+  return vgetq_lane_s32 (__builtin_aarch64_reduc_splus_v4si (__a), __LANE0 (4));
 }
 
 __extension__ static __inline int64_t __attribute__ ((__always_inline__))
 vaddvq_s64 (int64x2_t __a)
 {
-  return vgetq_lane_s64 (__builtin_aarch64_reduc_splus_v2di (__a), 0);
+  return vgetq_lane_s64 (__builtin_aarch64_reduc_splus_v2di (__a), __LANE0 (2));
 }
 
 __extension__ static __inline uint8_t __attribute__ ((__always_inline__))
 vaddvq_u8 (uint8x16_t __a)
 {
   return vgetq_lane_u8 ((uint8x16_t)
-		__builtin_aarch64_reduc_uplus_v16qi ((int8x16_t) __a), 0);
+		__builtin_aarch64_reduc_uplus_v16qi ((int8x16_t) __a),
+		__LANE0 (16));
 }
 
 __extension__ static __inline uint16_t __attribute__ ((__always_inline__))
 vaddvq_u16 (uint16x8_t __a)
 {
   return vgetq_lane_u16 ((uint16x8_t)
-		__builtin_aarch64_reduc_uplus_v8hi ((int16x8_t) __a), 0);
+		__builtin_aarch64_reduc_uplus_v8hi ((int16x8_t) __a),
+		__LANE0 (8));
 }
 
 __extension__ static __inline uint32_t __attribute__ ((__always_inline__))
 vaddvq_u32 (uint32x4_t __a)
 {
   return vgetq_lane_u32 ((uint32x4_t)
-		__builtin_aarch64_reduc_uplus_v4si ((int32x4_t) __a), 0);
+		__builtin_aarch64_reduc_uplus_v4si ((int32x4_t) __a),
+		__LANE0 (4));
 }
 
 __extension__ static __inline uint64_t __attribute__ ((__always_inline__))
 vaddvq_u64 (uint64x2_t __a)
 {
   return vgetq_lane_u64 ((uint64x2_t)
-		__builtin_aarch64_reduc_uplus_v2di ((int64x2_t) __a), 0);
+		__builtin_aarch64_reduc_uplus_v2di ((int64x2_t) __a),
+		__LANE0 (2));
 }
 
 __extension__ static __inline float32_t __attribute__ ((__always_inline__))
 vaddv_f32 (float32x2_t __a)
 {
-  float32x2_t t = __builtin_aarch64_reduc_splus_v2sf (__a);
-  return vget_lane_f32 (t, 0);
+  float32x2_t __t = __builtin_aarch64_reduc_splus_v2sf (__a);
+  return vget_lane_f32 (__t, __LANE0 (2));
 }
 
 __extension__ static __inline float32_t __attribute__ ((__always_inline__))
 vaddvq_f32 (float32x4_t __a)
 {
-  float32x4_t t = __builtin_aarch64_reduc_splus_v4sf (__a);
-  return vgetq_lane_f32 (t, 0);
+  float32x4_t __t = __builtin_aarch64_reduc_splus_v4sf (__a);
+  return vgetq_lane_f32 (__t, __LANE0 (4));
 }
 
 __extension__ static __inline float64_t __attribute__ ((__always_inline__))
 vaddvq_f64 (float64x2_t __a)
 {
-  float64x2_t t = __builtin_aarch64_reduc_splus_v2df (__a);
-  return vgetq_lane_f64 (t, 0);
+  float64x2_t __t = __builtin_aarch64_reduc_splus_v2df (__a);
+  return vgetq_lane_f64 (__t, __LANE0 (2));
 }
 
 /* vcage  */
@@ -20265,97 +20279,106 @@  vmaxnmq_f64 (float64x2_t __a, float64x2_t __b)
 __extension__ static __inline float32_t __attribute__ ((__always_inline__))
 vmaxv_f32 (float32x2_t __a)
 {
-  return vget_lane_f32 (__builtin_aarch64_reduc_smax_nan_v2sf (__a), 0);
+  return vget_lane_f32 (__builtin_aarch64_reduc_smax_nan_v2sf (__a),
+			__LANE0 (2));
 }
 
 __extension__ static __inline int8_t __attribute__ ((__always_inline__))
 vmaxv_s8 (int8x8_t __a)
 {
-  return vget_lane_s8 (__builtin_aarch64_reduc_smax_v8qi (__a), 0);
+  return vget_lane_s8 (__builtin_aarch64_reduc_smax_v8qi (__a), __LANE0 (8));
 }
 
 __extension__ static __inline int16_t __attribute__ ((__always_inline__))
 vmaxv_s16 (int16x4_t __a)
 {
-  return vget_lane_s16 (__builtin_aarch64_reduc_smax_v4hi (__a), 0);
+  return vget_lane_s16 (__builtin_aarch64_reduc_smax_v4hi (__a), __LANE0 (4));
 }
 
 __extension__ static __inline int32_t __attribute__ ((__always_inline__))
 vmaxv_s32 (int32x2_t __a)
 {
-  return vget_lane_s32 (__builtin_aarch64_reduc_smax_v2si (__a), 0);
+  return vget_lane_s32 (__builtin_aarch64_reduc_smax_v2si (__a), __LANE0 (2));
 }
 
 __extension__ static __inline uint8_t __attribute__ ((__always_inline__))
 vmaxv_u8 (uint8x8_t __a)
 {
   return vget_lane_u8 ((uint8x8_t)
-		__builtin_aarch64_reduc_umax_v8qi ((int8x8_t) __a), 0);
+		__builtin_aarch64_reduc_umax_v8qi ((int8x8_t) __a),
+		__LANE0 (8));
 }
 
 __extension__ static __inline uint16_t __attribute__ ((__always_inline__))
 vmaxv_u16 (uint16x4_t __a)
 {
   return vget_lane_u16 ((uint16x4_t)
-		__builtin_aarch64_reduc_umax_v4hi ((int16x4_t) __a), 0);
+		__builtin_aarch64_reduc_umax_v4hi ((int16x4_t) __a),
+		__LANE0 (4));
 }
 
 __extension__ static __inline uint32_t __attribute__ ((__always_inline__))
 vmaxv_u32 (uint32x2_t __a)
 {
   return vget_lane_u32 ((uint32x2_t)
-		__builtin_aarch64_reduc_umax_v2si ((int32x2_t) __a), 0);
+		__builtin_aarch64_reduc_umax_v2si ((int32x2_t) __a),
+		__LANE0 (2));
 }
 
 __extension__ static __inline float32_t __attribute__ ((__always_inline__))
 vmaxvq_f32 (float32x4_t __a)
 {
-  return vgetq_lane_f32 (__builtin_aarch64_reduc_smax_nan_v4sf (__a), 0);
+  return vgetq_lane_f32 (__builtin_aarch64_reduc_smax_nan_v4sf (__a),
+			 __LANE0 (4));
 }
 
 __extension__ static __inline float64_t __attribute__ ((__always_inline__))
 vmaxvq_f64 (float64x2_t __a)
 {
-  return vgetq_lane_f64 (__builtin_aarch64_reduc_smax_nan_v2df (__a), 0);
+  return vgetq_lane_f64 (__builtin_aarch64_reduc_smax_nan_v2df (__a),
+			 __LANE0 (2));
 }
 
 __extension__ static __inline int8_t __attribute__ ((__always_inline__))
 vmaxvq_s8 (int8x16_t __a)
 {
-  return vgetq_lane_s8 (__builtin_aarch64_reduc_smax_v16qi (__a), 0);
+  return vgetq_lane_s8 (__builtin_aarch64_reduc_smax_v16qi (__a), __LANE0 (16));
 }
 
 __extension__ static __inline int16_t __attribute__ ((__always_inline__))
 vmaxvq_s16 (int16x8_t __a)
 {
-  return vgetq_lane_s16 (__builtin_aarch64_reduc_smax_v8hi (__a), 0);
+  return vgetq_lane_s16 (__builtin_aarch64_reduc_smax_v8hi (__a), __LANE0 (8));
 }
 
 __extension__ static __inline int32_t __attribute__ ((__always_inline__))
 vmaxvq_s32 (int32x4_t __a)
 {
-  return vgetq_lane_s32 (__builtin_aarch64_reduc_smax_v4si (__a), 0);
+  return vgetq_lane_s32 (__builtin_aarch64_reduc_smax_v4si (__a), __LANE0 (4));
 }
 
 __extension__ static __inline uint8_t __attribute__ ((__always_inline__))
 vmaxvq_u8 (uint8x16_t __a)
 {
   return vgetq_lane_u8 ((uint8x16_t)
-		__builtin_aarch64_reduc_umax_v16qi ((int8x16_t) __a), 0);
+		__builtin_aarch64_reduc_umax_v16qi ((int8x16_t) __a),
+		__LANE0 (16));
 }
 
 __extension__ static __inline uint16_t __attribute__ ((__always_inline__))
 vmaxvq_u16 (uint16x8_t __a)
 {
   return vgetq_lane_u16 ((uint16x8_t)
-		__builtin_aarch64_reduc_umax_v8hi ((int16x8_t) __a), 0);
+		__builtin_aarch64_reduc_umax_v8hi ((int16x8_t) __a),
+		__LANE0 (8));
 }
 
 __extension__ static __inline uint32_t __attribute__ ((__always_inline__))
 vmaxvq_u32 (uint32x4_t __a)
 {
   return vgetq_lane_u32 ((uint32x4_t)
-		__builtin_aarch64_reduc_umax_v4si ((int32x4_t) __a), 0);
+		__builtin_aarch64_reduc_umax_v4si ((int32x4_t) __a),
+		__LANE0 (4));
 }
 
 /* vmaxnmv  */
@@ -20363,19 +20386,20 @@  vmaxvq_u32 (uint32x4_t __a)
 __extension__ static __inline float32_t __attribute__ ((__always_inline__))
 vmaxnmv_f32 (float32x2_t __a)
 {
-  return vget_lane_f32 (__builtin_aarch64_reduc_smax_v2sf (__a), 0);
+  return vget_lane_f32 (__builtin_aarch64_reduc_smax_v2sf (__a),
+			__LANE0 (2));
 }
 
 __extension__ static __inline float32_t __attribute__ ((__always_inline__))
 vmaxnmvq_f32 (float32x4_t __a)
 {
-  return vgetq_lane_f32 (__builtin_aarch64_reduc_smax_v4sf (__a), 0);
+  return vgetq_lane_f32 (__builtin_aarch64_reduc_smax_v4sf (__a), __LANE0 (4));
 }
 
 __extension__ static __inline float64_t __attribute__ ((__always_inline__))
 vmaxnmvq_f64 (float64x2_t __a)
 {
-  return vgetq_lane_f64 (__builtin_aarch64_reduc_smax_v2df (__a), 0);
+  return vgetq_lane_f64 (__builtin_aarch64_reduc_smax_v2df (__a), __LANE0 (2));
 }
 
 /* vmin  */
@@ -20501,97 +20525,107 @@  vminnmq_f64 (float64x2_t __a, float64x2_t __b)
 __extension__ static __inline float32_t __attribute__ ((__always_inline__))
 vminv_f32 (float32x2_t __a)
 {
-  return vget_lane_f32 (__builtin_aarch64_reduc_smin_nan_v2sf (__a), 0);
+  return vget_lane_f32 (__builtin_aarch64_reduc_smin_nan_v2sf (__a),
+			__LANE0 (2));
 }
 
 __extension__ static __inline int8_t __attribute__ ((__always_inline__))
 vminv_s8 (int8x8_t __a)
 {
-  return vget_lane_s8 (__builtin_aarch64_reduc_smin_v8qi (__a), 0);
+  return vget_lane_s8 (__builtin_aarch64_reduc_smin_v8qi (__a),
+		       __LANE0 (8));
 }
 
 __extension__ static __inline int16_t __attribute__ ((__always_inline__))
 vminv_s16 (int16x4_t __a)
 {
-  return vget_lane_s16 (__builtin_aarch64_reduc_smin_v4hi (__a), 0);
+  return vget_lane_s16 (__builtin_aarch64_reduc_smin_v4hi (__a), __LANE0 (4));
 }
 
 __extension__ static __inline int32_t __attribute__ ((__always_inline__))
 vminv_s32 (int32x2_t __a)
 {
-  return vget_lane_s32 (__builtin_aarch64_reduc_smin_v2si (__a), 0);
+  return vget_lane_s32 (__builtin_aarch64_reduc_smin_v2si (__a), __LANE0 (2));
 }
 
 __extension__ static __inline uint8_t __attribute__ ((__always_inline__))
 vminv_u8 (uint8x8_t __a)
 {
   return vget_lane_u8 ((uint8x8_t)
-		__builtin_aarch64_reduc_umin_v8qi ((int8x8_t) __a), 0);
+		__builtin_aarch64_reduc_umin_v8qi ((int8x8_t) __a),
+		__LANE0 (8));
 }
 
 __extension__ static __inline uint16_t __attribute__ ((__always_inline__))
 vminv_u16 (uint16x4_t __a)
 {
   return vget_lane_u16 ((uint16x4_t)
-		__builtin_aarch64_reduc_umin_v4hi ((int16x4_t) __a), 0);
+		__builtin_aarch64_reduc_umin_v4hi ((int16x4_t) __a),
+		__LANE0 (4));
 }
 
 __extension__ static __inline uint32_t __attribute__ ((__always_inline__))
 vminv_u32 (uint32x2_t __a)
 {
   return vget_lane_u32 ((uint32x2_t)
-		__builtin_aarch64_reduc_umin_v2si ((int32x2_t) __a), 0);
+		__builtin_aarch64_reduc_umin_v2si ((int32x2_t) __a),
+		__LANE0 (2));
 }
 
 __extension__ static __inline float32_t __attribute__ ((__always_inline__))
 vminvq_f32 (float32x4_t __a)
 {
-  return vgetq_lane_f32 (__builtin_aarch64_reduc_smin_nan_v4sf (__a), 0);
+  return vgetq_lane_f32 (__builtin_aarch64_reduc_smin_nan_v4sf (__a),
+			 __LANE0 (4));
 }
 
 __extension__ static __inline float64_t __attribute__ ((__always_inline__))
 vminvq_f64 (float64x2_t __a)
 {
-  return vgetq_lane_f64 (__builtin_aarch64_reduc_smin_nan_v2df (__a), 0);
+  return vgetq_lane_f64 (__builtin_aarch64_reduc_smin_nan_v2df (__a),
+			 __LANE0 (2));
 }
 
 __extension__ static __inline int8_t __attribute__ ((__always_inline__))
 vminvq_s8 (int8x16_t __a)
 {
-  return vgetq_lane_s8 (__builtin_aarch64_reduc_smin_v16qi (__a), 0);
+  return vgetq_lane_s8 (__builtin_aarch64_reduc_smin_v16qi (__a), __LANE0 (16));
 }
 
 __extension__ static __inline int16_t __attribute__ ((__always_inline__))
 vminvq_s16 (int16x8_t __a)
 {
-  return vgetq_lane_s16 (__builtin_aarch64_reduc_smin_v8hi (__a), 0);
+  return vgetq_lane_s16 (__builtin_aarch64_reduc_smin_v8hi (__a), __LANE0 (8));
 }
 
 __extension__ static __inline int32_t __attribute__ ((__always_inline__))
 vminvq_s32 (int32x4_t __a)
 {
-  return vgetq_lane_s32 (__builtin_aarch64_reduc_smin_v4si (__a), 0);
+  return vgetq_lane_s32 (__builtin_aarch64_reduc_smin_v4si (__a), __LANE0 (4));
 }
 
 __extension__ static __inline uint8_t __attribute__ ((__always_inline__))
 vminvq_u8 (uint8x16_t __a)
 {
   return vgetq_lane_u8 ((uint8x16_t)
-		__builtin_aarch64_reduc_umin_v16qi ((int8x16_t) __a), 0);
+		__builtin_aarch64_reduc_umin_v16qi ((int8x16_t) __a),
+		__LANE0 (16));
 }
 
 __extension__ static __inline uint16_t __attribute__ ((__always_inline__))
 vminvq_u16 (uint16x8_t __a)
 {
   return vgetq_lane_u16 ((uint16x8_t)
-		__builtin_aarch64_reduc_umin_v8hi ((int16x8_t) __a), 0);
+		__builtin_aarch64_reduc_umin_v8hi ((int16x8_t) __a),
+		__LANE0 (8));
 }
 
 __extension__ static __inline uint32_t __attribute__ ((__always_inline__))
 vminvq_u32 (uint32x4_t __a)
 {
   return vgetq_lane_u32 ((uint32x4_t)
-		__builtin_aarch64_reduc_umin_v4si ((int32x4_t) __a), 0);
+		__builtin_aarch64_reduc_umin_v4si ((int32x4_t) __a),
+		__LANE0 (4));
 }
 
 /* vminnmv  */
@@ -20599,19 +20633,19 @@  vminvq_u32 (uint32x4_t __a)
 __extension__ static __inline float32_t __attribute__ ((__always_inline__))
 vminnmv_f32 (float32x2_t __a)
 {
-  return vget_lane_f32 (__builtin_aarch64_reduc_smin_v2sf (__a), 0);
+  return vget_lane_f32 (__builtin_aarch64_reduc_smin_v2sf (__a), __LANE0 (2));
 }
 
 __extension__ static __inline float32_t __attribute__ ((__always_inline__))
 vminnmvq_f32 (float32x4_t __a)
 {
-  return vgetq_lane_f32 (__builtin_aarch64_reduc_smin_v4sf (__a), 0);
+  return vgetq_lane_f32 (__builtin_aarch64_reduc_smin_v4sf (__a), __LANE0 (4));
 }
 
 __extension__ static __inline float64_t __attribute__ ((__always_inline__))
 vminnmvq_f64 (float64x2_t __a)
 {
-  return vgetq_lane_f64 (__builtin_aarch64_reduc_smin_v2df (__a), 0);
+  return vgetq_lane_f64 (__builtin_aarch64_reduc_smin_v2df (__a), __LANE0 (2));
 }
 
 /* vmla */
@@ -25444,6 +25478,8 @@  __INTERLEAVE_LIST (zip)
 
 /* End of optimal implementations in approved order.  */
 
+#undef __LANE0
+
 #undef __aarch64_vget_lane_any
 #undef __aarch64_vget_lane_f32
 #undef __aarch64_vget_lane_f64
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 50bdac9..fd7152c 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -108,7 +108,10 @@ 
 (define_mode_iterator VALLDI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI V2SF V4SF V2DF DI])
 
 ;; Vector modes for Integer reduction across lanes.
-(define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI])
+(define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI V2DI])
+
+;; Vector modes(except V2DI) for Integer reduction across lanes.
+(define_mode_iterator VDQV_S [V8QI V16QI V4HI V8HI V4SI])
 
 ;; All double integer narrow-able modes.
 (define_mode_iterator VDN [V4HI V2SI DI])
@@ -585,6 +588,12 @@ 
 			       (V2DF  "_q")
 		     (QI "") (HI "") (SI "") (DI "") (SF "") (DF "")])
 
+(define_mode_attr vp [(V8QI "v") (V16QI "v")
+		      (V4HI "v") (V8HI  "v")
+		      (V2SI "p") (V4SI  "v")
+		      (V2DI  "p") (V2DF  "p")
+		      (V2SF "p") (V4SF  "v")])
+
 ;; -------------------------------------------------------------------
 ;; Code Iterators
 ;; -------------------------------------------------------------------