diff mbox

[1/4,AArch64] Fix ICE on non-constant indices to __builtin_aarch64_im_lane_boundsi

Message ID 54819CE1.2050508@arm.com
State New
Headers show

Commit Message

Alan Lawrence Dec. 5, 2014, 11:54 a.m. UTC
When the lane index to e.g. vset_lane_xxx is a non-constant, at present we get 
an ICE:

In file included from 
gcc/testsuite/gcc.target/aarch64/simd/vset_lane_s16_const_1.c:6:0:
/work/alalaw01/oban/buildfsf-aarch64-none-elf/install/lib/gcc/aarch64-none-elf/5.0.0/include/arm_neon.h: 
In function 'main':
/work/alalaw01/oban/buildfsf-aarch64-none-elf/install/lib/gcc/aarch64-none-elf/5.0.0/include/arm_neon.h:4280:10: 
internal compiler error: in aarch64_simd_lane_bounds, at 
config/aarch64/aarch64.c:8410
return __aarch64_vset_lane_any (__elem, __vec, __index);
   ^
0x100e0f1 aarch64_simd_lane_bounds(rtx_def*, long, long, tree_node const*)
   /work/alalaw01/oban/srcfsf/gcc/gcc/config/aarch64/aarch64.c:8410
0x107b279 gen_aarch64_im_lane_boundsi(rtx_def*, rtx_def*)
   /work/alalaw01/oban/srcfsf/gcc/gcc/config/aarch64/aarch64-simd.md:4560
0x7fc50e insn_gen_fn::operator()(rtx_def*, rtx_def*) const
   /work/alalaw01/oban/srcfsf/gcc/gcc/recog.h:303
0x10142f5 aarch64_simd_expand_args
   /work/alalaw01/oban/srcfsf/gcc/gcc/config/aarch64/aarch64-builtins.c:970
0x1014692 aarch64_simd_expand_builtin(int, tree_node*, rtx_def*)
   /work/alalaw01/oban/srcfsf/gcc/gcc/config/aarch64/aarch64-builtins.c:1051
0x1014bb0 aarch64_expand_builtin(tree_node*, rtx_def*, rtx_def*, machine_mode, int)
   /work/alalaw01/oban/srcfsf/gcc/gcc/config/aarch64/aarch64-builtins.c:1133
0x7683d6 expand_builtin(tree_node*, rtx_def*, rtx_def*, machine_mode, int)
   /work/alalaw01/oban/srcfsf/gcc/gcc/builtins.c:5912

Code with a non-constant lane index is invalid, but this patch improves the 
handling and error message to the following:

In file included from 
gcc/testsuite/gcc.target/aarch64/simd/vset_lane_s16_const_1.c:6:0:
In function 'vset_lane_s16',
   inlined from 'main' at 
gcc/testsuite/gcc.target/aarch64/simd/vset_lane_s16_const_1.c:13:13:
/work/alalaw01/oban/buildfsf-aarch64-none-elf/install/lib/gcc/aarch64-none-elf/5.0.0/include/arm_neon.h:4281:10: 
error: lane index must be a constant immediate
return __aarch64_vset_lane_any (__elem, __vec, __index);

Unfortunately the source code printed out is in arm_neon.h, but this at least 
contains the source code location (here vset_lane_s16_const_1.c:13:13), and it 
isn't an ICE ;).

Technique is to remove the aarch64_im_lane_boundsi expander, and to handle it as 
a special case in aarch64_simd_expand_builtin, where the tree (recording the 
inlining history) is available. This allows removal of the old pattern and 
associated bits.

Also replace the hand-coded #lanes in all arm_neon.h's calls to 
__builtin_aarch64_im_lane_boundsi, with a #lanes computed automatically via sizeof.

gcc/ChangeLog:

	* config/aarch64/aarch64-builtins.c (aarch64_types_binopv_qualifiers,
	TYPES_BINOPV): Delete.
	(enum aarch64_builtins): Add AARCH64_BUILTIN_SIMD_LANE_CHECK and
	AARCH64_SIMD_PATTERN_START.
	(aarch64_init_simd_builtins): Register
	__builtin_aarch64_im_lane_boundsi; use  AARCH64_SIMD_PATTERN_START.
	(aarch64_simd_expand_builtin): Handle AARCH64_BUILTIN_LANE_CHECK; use
	AARCH64_SIMD_PATTERN_START.

	* config/aarch64/aarch64-simd.md (aarch64_im_lane_boundsi): Delete.
	* config/aarch64/aarch64-simd-builtins.def (im_lane_bound): Delete.

	* config/aarch64/arm_neon.h (__AARCH64_LANE_CHECK): New.
	(__aarch64_vget_lane_f64, __aarch64_vget_lane_s64,
	__aarch64_vget_lane_u64, __aarch64_vset_lane_any, vdupd_lane_f64,
	vdupd_lane_s64, vdupd_lane_u64, vext_f32, vext_f64, vext_p8, vext_p16,
	vext_s8, vext_s16, vext_s32, vext_s64, vext_u8, vext_u16, vext_u32,
	vext_u64, vextq_f32, vextq_f64, vextq_p8, vextq_p16, vextq_s8,
	vextq_s16, vextq_s32, vextq_s64, vextq_u8, vextq_u16, vextq_u32,
	vextq_u64, vmulq_lane_f64): Use __AARCH64_LANE_CHECK.

gcc/testsuite/ChangeLog:

	* gcc.target/aarch64/simd/vset_lane_s16_const_1.c: New test.

Comments

Marcus Shawcroft Dec. 5, 2014, 6:23 p.m. UTC | #1
On 5 December 2014 at 11:54, Alan Lawrence <alan.lawrence@arm.com> wrote:

> gcc/ChangeLog:
>
>         * config/aarch64/aarch64-builtins.c
> (aarch64_types_binopv_qualifiers,
>         TYPES_BINOPV): Delete.
>         (enum aarch64_builtins): Add AARCH64_BUILTIN_SIMD_LANE_CHECK and
>         AARCH64_SIMD_PATTERN_START.
>         (aarch64_init_simd_builtins): Register
>         __builtin_aarch64_im_lane_boundsi; use  AARCH64_SIMD_PATTERN_START.
>         (aarch64_simd_expand_builtin): Handle AARCH64_BUILTIN_LANE_CHECK;
> use
>         AARCH64_SIMD_PATTERN_START.
>
>         * config/aarch64/aarch64-simd.md (aarch64_im_lane_boundsi): Delete.
>         * config/aarch64/aarch64-simd-builtins.def (im_lane_bound): Delete.
>
>         * config/aarch64/arm_neon.h (__AARCH64_LANE_CHECK): New.
>         (__aarch64_vget_lane_f64, __aarch64_vget_lane_s64,
>         __aarch64_vget_lane_u64, __aarch64_vset_lane_any, vdupd_lane_f64,
>         vdupd_lane_s64, vdupd_lane_u64, vext_f32, vext_f64, vext_p8,
> vext_p16,
>         vext_s8, vext_s16, vext_s32, vext_s64, vext_u8, vext_u16, vext_u32,
>         vext_u64, vextq_f32, vextq_f64, vextq_p8, vextq_p16, vextq_s8,
>         vextq_s16, vextq_s32, vextq_s64, vextq_u8, vextq_u16, vextq_u32,
>         vextq_u64, vmulq_lane_f64): Use __AARCH64_LANE_CHECK.
>
> gcc/testsuite/ChangeLog:
>
>         * gcc.target/aarch64/simd/vset_lane_s16_const_1.c: New test.

OK /Marcus
diff mbox

Patch

diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index e9c4c85fd3f1dbbb81d306bbab79409034261dc3..8aceeb4cabee65b1725deb5b848312a8bc73f973 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -143,10 +143,6 @@  aarch64_types_binop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
   = { qualifier_none, qualifier_none, qualifier_maybe_immediate };
 #define TYPES_BINOP (aarch64_types_binop_qualifiers)
 static enum aarch64_type_qualifiers
-aarch64_types_binopv_qualifiers[SIMD_MAX_BUILTIN_ARGS]
-  = { qualifier_void, qualifier_none, qualifier_none };
-#define TYPES_BINOPV (aarch64_types_binopv_qualifiers)
-static enum aarch64_type_qualifiers
 aarch64_types_binopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
   = { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned };
 #define TYPES_BINOPU (aarch64_types_binopu_qualifiers)
@@ -344,9 +340,12 @@  enum aarch64_builtins
   AARCH64_BUILTIN_SET_FPSR,
 
   AARCH64_SIMD_BUILTIN_BASE,
+  AARCH64_SIMD_BUILTIN_LANE_CHECK,
 #include "aarch64-simd-builtins.def"
-  AARCH64_SIMD_BUILTIN_MAX = AARCH64_SIMD_BUILTIN_BASE
-			      + ARRAY_SIZE (aarch64_simd_builtin_data),
+  /* The first enum element which is based on an insn_data pattern.  */
+  AARCH64_SIMD_PATTERN_START = AARCH64_SIMD_BUILTIN_LANE_CHECK + 1,
+  AARCH64_SIMD_BUILTIN_MAX = AARCH64_SIMD_PATTERN_START
+			      + ARRAY_SIZE (aarch64_simd_builtin_data) - 1,
   AARCH64_CRC32_BUILTIN_BASE,
   AARCH64_CRC32_BUILTINS
   AARCH64_CRC32_BUILTIN_MAX,
@@ -687,7 +686,7 @@  aarch64_init_simd_builtin_scalar_types (void)
 static void
 aarch64_init_simd_builtins (void)
 {
-  unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE + 1;
+  unsigned int i, fcode = AARCH64_SIMD_PATTERN_START;
 
   aarch64_init_simd_builtin_types ();
 
@@ -697,6 +696,15 @@  aarch64_init_simd_builtins (void)
      system.  */
   aarch64_init_simd_builtin_scalar_types ();
  
+  tree lane_check_fpr = build_function_type_list (void_type_node,
+						  intSI_type_node,
+						  intSI_type_node,
+						  NULL);
+  aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_LANE_CHECK] =
+      add_builtin_function ("__builtin_aarch64_im_lane_boundsi", lane_check_fpr,
+			    AARCH64_SIMD_BUILTIN_LANE_CHECK, BUILT_IN_MD,
+			    NULL, NULL_TREE);
+
   for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++, fcode++)
     {
       bool print_type_signature_p = false;
@@ -998,8 +1006,20 @@  aarch64_simd_expand_args (rtx target, int icode, int have_retval,
 rtx
 aarch64_simd_expand_builtin (int fcode, tree exp, rtx target)
 {
+  if (fcode == AARCH64_SIMD_BUILTIN_LANE_CHECK)
+    {
+      tree nlanes = CALL_EXPR_ARG (exp, 0);
+      gcc_assert (TREE_CODE (nlanes) == INTEGER_CST);
+      rtx lane_idx = expand_normal (CALL_EXPR_ARG (exp, 1));
+      if (CONST_INT_P (lane_idx))
+	aarch64_simd_lane_bounds (lane_idx, 0, TREE_INT_CST_LOW (nlanes), exp);
+      else
+	error ("%Klane index must be a constant immediate", exp);
+      /* Don't generate any RTL.  */
+      return const0_rtx;
+    }
   aarch64_simd_builtin_datum *d =
-		&aarch64_simd_builtin_data[fcode - (AARCH64_SIMD_BUILTIN_BASE + 1)];
+		&aarch64_simd_builtin_data[fcode - AARCH64_SIMD_PATTERN_START];
   enum insn_code icode = d->code;
   builtin_simd_arg args[SIMD_MAX_BUILTIN_ARGS];
   int num_args = insn_data[d->code].n_operands;
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 545c7da935e1338951c006c71e29a921c5613f33..1872445543dd3a4fc044c7a7335a335b91179f88 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -386,5 +386,3 @@ 
   VAR1 (BINOPP, crypto_pmull, 0, di)
   VAR1 (BINOPP, crypto_pmull, 0, v2di)
 
-  /* Meta-op to check lane bounds of immediate in aarch64_expand_builtin.  */
-  VAR1 (BINOPV, im_lane_bound, 0, si)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 23345b1df1ebb28075edd2effd5f327749abd61d..2656814e53d3ab2e9fd44bf800dd6316191e2071 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -4549,19 +4549,6 @@ 
   [(set_attr "type" "neon_ext<q>")]
 )
 
-;; This exists solely to check the arguments to the corresponding __builtin.
-;; Used where we want an error for out-of-range indices which would otherwise
-;; be silently wrapped (e.g. the mask to a __builtin_shuffle).
-(define_expand "aarch64_im_lane_boundsi"
-  [(match_operand:SI 0 "immediate_operand" "i")
-   (match_operand:SI 1 "immediate_operand" "i")]
-  "TARGET_SIMD"
-{
-  aarch64_simd_lane_bounds (operands[0], 0, INTVAL (operands[1]), NULL);
-  DONE;
-}
-)
-
 (define_insn "aarch64_rev<REVERSE:rev_op><mode>"
   [(set (match_operand:VALL 0 "register_operand" "=w")
 	(unspec:VALL [(match_operand:VALL 1 "register_operand" "w")]
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 1291a8d4d002e533e31912d3e1ffb5e819aa9e5c..409cb8aae400392f5112bcf8f9847170c2cc9c78 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -436,7 +436,7 @@  typedef struct poly16x8x4_t
   __aarch64_vget_lane_any (v2sf, , , __a, __b)
 #define __aarch64_vget_lane_f64(__a, __b) __extension__	\
   ({							\
-    __builtin_aarch64_im_lane_boundsi (__b, 1);		\
+    __AARCH64_LANE_CHECK (__a, __b);		\
     __a[0];						\
   })
 
@@ -453,7 +453,7 @@  typedef struct poly16x8x4_t
   __aarch64_vget_lane_any (v2si, , ,__a, __b)
 #define __aarch64_vget_lane_s64(__a, __b) __extension__	\
   ({							\
-    __builtin_aarch64_im_lane_boundsi (__b, 1);		\
+    __AARCH64_LANE_CHECK (__a, __b);		\
     __a[0];						\
   })
 
@@ -465,7 +465,7 @@  typedef struct poly16x8x4_t
   __aarch64_vget_lane_any (v2si, (uint32_t), (int32x2_t), __a, __b)
 #define __aarch64_vget_lane_u64(__a, __b) __extension__	\
   ({							\
-    __builtin_aarch64_im_lane_boundsi (__b, 1);		\
+    __AARCH64_LANE_CHECK (__a, __b);		\
     __a[0];						\
   })
 
@@ -607,6 +607,8 @@  typedef struct poly16x8x4_t
 /* Internal macro for lane indices.  */
 
 #define __AARCH64_NUM_LANES(__v) (sizeof (__v) / sizeof (__v[0]))
+#define __AARCH64_LANE_CHECK(__vec, __idx)	\
+	__builtin_aarch64_im_lane_boundsi (__AARCH64_NUM_LANES (__vec), __idx)
 
 /* For big-endian, GCC's vector indices are the opposite way around
    to the architectural lane indices used by Neon intrinsics.  */
@@ -621,8 +623,7 @@  typedef struct poly16x8x4_t
 #define __aarch64_vset_lane_any(__elem, __vec, __index)			\
   __extension__								\
   ({									\
-    __builtin_aarch64_im_lane_boundsi (__index,			\
-       __AARCH64_NUM_LANES (__vec));					\
+    __AARCH64_LANE_CHECK (__vec, __index);				\
     __vec[__aarch64_lane (__vec, __index)] = __elem;			\
     __vec;								\
   })
@@ -15096,21 +15097,21 @@  vdups_lane_u32 (uint32x2_t __a, const int __b)
 __extension__ static __inline float64_t __attribute__ ((__always_inline__))
 vdupd_lane_f64 (float64x1_t __a, const int __b)
 {
-  __builtin_aarch64_im_lane_boundsi (__b, 1);
+  __AARCH64_LANE_CHECK (__a, __b);
   return __a[0];
 }
 
 __extension__ static __inline int64_t __attribute__ ((__always_inline__))
 vdupd_lane_s64 (int64x1_t __a, const int __b)
 {
-  __builtin_aarch64_im_lane_boundsi (__b, 1);
+  __AARCH64_LANE_CHECK (__a, __b);
   return __a[0];
 }
 
 __extension__ static __inline uint64_t __attribute__ ((__always_inline__))
 vdupd_lane_u64 (uint64x1_t __a, const int __b)
 {
-  __builtin_aarch64_im_lane_boundsi (__b, 1);
+  __AARCH64_LANE_CHECK (__a, __b);
   return __a[0];
 }
 
@@ -15195,7 +15196,7 @@  vdupd_laneq_u64 (uint64x2_t __a, const int __b)
 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
 vext_f32 (float32x2_t __a, float32x2_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 2);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint32x2_t) {2-__c, 3-__c});
 #else
@@ -15206,14 +15207,14 @@  vext_f32 (float32x2_t __a, float32x2_t __b, __const int __c)
 __extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
 vext_f64 (float64x1_t __a, float64x1_t __b, __const int __c)
 {
+  __AARCH64_LANE_CHECK (__a, __c);
   /* The only possible index to the assembler instruction returns element 0.  */
-  __builtin_aarch64_im_lane_boundsi (__c, 1);
   return __a;
 }
 __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
 vext_p8 (poly8x8_t __a, poly8x8_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 8);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint8x8_t)
       {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
@@ -15226,7 +15227,7 @@  vext_p8 (poly8x8_t __a, poly8x8_t __b, __const int __c)
 __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
 vext_p16 (poly16x4_t __a, poly16x4_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 4);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a,
       (uint16x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
@@ -15238,7 +15239,7 @@  vext_p16 (poly16x4_t __a, poly16x4_t __b, __const int __c)
 __extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
 vext_s8 (int8x8_t __a, int8x8_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 8);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint8x8_t)
       {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
@@ -15251,7 +15252,7 @@  vext_s8 (int8x8_t __a, int8x8_t __b, __const int __c)
 __extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
 vext_s16 (int16x4_t __a, int16x4_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 4);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a,
       (uint16x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
@@ -15263,7 +15264,7 @@  vext_s16 (int16x4_t __a, int16x4_t __b, __const int __c)
 __extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
 vext_s32 (int32x2_t __a, int32x2_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 2);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint32x2_t) {2-__c, 3-__c});
 #else
@@ -15274,15 +15275,15 @@  vext_s32 (int32x2_t __a, int32x2_t __b, __const int __c)
 __extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
 vext_s64 (int64x1_t __a, int64x1_t __b, __const int __c)
 {
+  __AARCH64_LANE_CHECK (__a, __c);
   /* The only possible index to the assembler instruction returns element 0.  */
-  __builtin_aarch64_im_lane_boundsi (__c, 1);
   return __a;
 }
 
 __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
 vext_u8 (uint8x8_t __a, uint8x8_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 8);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint8x8_t)
       {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
@@ -15295,7 +15296,7 @@  vext_u8 (uint8x8_t __a, uint8x8_t __b, __const int __c)
 __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
 vext_u16 (uint16x4_t __a, uint16x4_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 4);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a,
       (uint16x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
@@ -15307,7 +15308,7 @@  vext_u16 (uint16x4_t __a, uint16x4_t __b, __const int __c)
 __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
 vext_u32 (uint32x2_t __a, uint32x2_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 2);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint32x2_t) {2-__c, 3-__c});
 #else
@@ -15318,15 +15319,15 @@  vext_u32 (uint32x2_t __a, uint32x2_t __b, __const int __c)
 __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
 vext_u64 (uint64x1_t __a, uint64x1_t __b, __const int __c)
 {
+  __AARCH64_LANE_CHECK (__a, __c);
   /* The only possible index to the assembler instruction returns element 0.  */
-  __builtin_aarch64_im_lane_boundsi (__c, 1);
   return __a;
 }
 
 __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
 vextq_f32 (float32x4_t __a, float32x4_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 4);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a,
       (uint32x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
@@ -15338,7 +15339,7 @@  vextq_f32 (float32x4_t __a, float32x4_t __b, __const int __c)
 __extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
 vextq_f64 (float64x2_t __a, float64x2_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 2);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
 #else
@@ -15349,7 +15350,7 @@  vextq_f64 (float64x2_t __a, float64x2_t __b, __const int __c)
 __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
 vextq_p8 (poly8x16_t __a, poly8x16_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 16);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint8x16_t)
       {16-__c, 17-__c, 18-__c, 19-__c, 20-__c, 21-__c, 22-__c, 23-__c,
@@ -15364,7 +15365,7 @@  vextq_p8 (poly8x16_t __a, poly8x16_t __b, __const int __c)
 __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
 vextq_p16 (poly16x8_t __a, poly16x8_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 8);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint16x8_t)
       {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
@@ -15377,7 +15378,7 @@  vextq_p16 (poly16x8_t __a, poly16x8_t __b, __const int __c)
 __extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
 vextq_s8 (int8x16_t __a, int8x16_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 16);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint8x16_t)
       {16-__c, 17-__c, 18-__c, 19-__c, 20-__c, 21-__c, 22-__c, 23-__c,
@@ -15392,7 +15393,7 @@  vextq_s8 (int8x16_t __a, int8x16_t __b, __const int __c)
 __extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
 vextq_s16 (int16x8_t __a, int16x8_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 8);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint16x8_t)
       {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
@@ -15405,7 +15406,7 @@  vextq_s16 (int16x8_t __a, int16x8_t __b, __const int __c)
 __extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
 vextq_s32 (int32x4_t __a, int32x4_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 4);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a,
       (uint32x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
@@ -15417,7 +15418,7 @@  vextq_s32 (int32x4_t __a, int32x4_t __b, __const int __c)
 __extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
 vextq_s64 (int64x2_t __a, int64x2_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 2);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
 #else
@@ -15428,7 +15429,7 @@  vextq_s64 (int64x2_t __a, int64x2_t __b, __const int __c)
 __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
 vextq_u8 (uint8x16_t __a, uint8x16_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 16);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint8x16_t)
       {16-__c, 17-__c, 18-__c, 19-__c, 20-__c, 21-__c, 22-__c, 23-__c,
@@ -15443,7 +15444,7 @@  vextq_u8 (uint8x16_t __a, uint8x16_t __b, __const int __c)
 __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
 vextq_u16 (uint16x8_t __a, uint16x8_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 8);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint16x8_t)
       {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
@@ -15456,7 +15457,7 @@  vextq_u16 (uint16x8_t __a, uint16x8_t __b, __const int __c)
 __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
 vextq_u32 (uint32x4_t __a, uint32x4_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 4);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a,
       (uint32x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
@@ -15468,7 +15469,7 @@  vextq_u32 (uint32x4_t __a, uint32x4_t __b, __const int __c)
 __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
 vextq_u64 (uint64x2_t __a, uint64x2_t __b, __const int __c)
 {
-  __builtin_aarch64_im_lane_boundsi (__c, 2);
+  __AARCH64_LANE_CHECK (__a, __c);
 #ifdef __AARCH64EB__
   return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
 #else
@@ -19242,7 +19243,7 @@  vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __lane)
 __extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
 vmulq_lane_f64 (float64x2_t __a, float64x1_t __b, const int __lane)
 {
-  __builtin_aarch64_im_lane_boundsi (__lane, 1);
+  __AARCH64_LANE_CHECK (__a, __lane);
   return __a * __b[0];
 }
 
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vset_lane_s16_const_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vset_lane_s16_const_1.c
new file mode 100644
index 0000000000000000000000000000000000000000..b28d67f74b076412e5dc4982449735aa227322bb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vset_lane_s16_const_1.c
@@ -0,0 +1,15 @@ 
+/* Test error message when passing a non-constant value in as a lane index.  */
+
+/* { dg-do assemble } */
+/* { dg-options "-std=c99" } */
+
+#include <arm_neon.h>
+
+int
+main (int argc, char **argv)
+{
+  int16x4_t in = vcreate_s16 (0xdeadbeef00000000ULL);
+  /* { dg-error "must be a constant immediate" "" { target *-*-* } 0 } */
+  int16x4_t out = vset_lane_s16 (65535, in, argc);
+  return vget_lane_s16 (out, 0);
+}