diff mbox series

[v2,ARM,7x] : MVE vreinterpretq and vuninitializedq intrinsics.

Message ID AM0PR08MB53802F1F446BFB261487EEAD9BF40@AM0PR08MB5380.eurprd08.prod.outlook.com
State New
Headers show
Series [v2,ARM,7x] : MVE vreinterpretq and vuninitializedq intrinsics. | expand

Commit Message

Srinath Parvathaneni March 19, 2020, 5:59 p.m. UTC
Hello Kyrill,

This patch addresses all the comments in patch version v2.
(version v2) https://gcc.gnu.org/pipermail/gcc-patches/2019-November/534351.html

####

Hello,

This patch supports following MVE ACLE intrinsics.

vreinterpretq_s16_s32, vreinterpretq_s16_s64, vreinterpretq_s16_s8, vreinterpretq_s16_u16,
vreinterpretq_s16_u32, vreinterpretq_s16_u64, vreinterpretq_s16_u8, vreinterpretq_s32_s16,
vreinterpretq_s32_s64, vreinterpretq_s32_s8, vreinterpretq_s32_u16, vreinterpretq_s32_u32,
vreinterpretq_s32_u64, vreinterpretq_s32_u8, vreinterpretq_s64_s16, vreinterpretq_s64_s32,
vreinterpretq_s64_s8, vreinterpretq_s64_u16, vreinterpretq_s64_u32, vreinterpretq_s64_u64,
vreinterpretq_s64_u8, vreinterpretq_s8_s16, vreinterpretq_s8_s32, vreinterpretq_s8_s64,
vreinterpretq_s8_u16, vreinterpretq_s8_u32, vreinterpretq_s8_u64, vreinterpretq_s8_u8,
vreinterpretq_u16_s16, vreinterpretq_u16_s32, vreinterpretq_u16_s64, vreinterpretq_u16_s8,
vreinterpretq_u16_u32, vreinterpretq_u16_u64, vreinterpretq_u16_u8, vreinterpretq_u32_s16,
vreinterpretq_u32_s32, vreinterpretq_u32_s64, vreinterpretq_u32_s8, vreinterpretq_u32_u16,
vreinterpretq_u32_u64, vreinterpretq_u32_u8, vreinterpretq_u64_s16, vreinterpretq_u64_s32,
vreinterpretq_u64_s64, vreinterpretq_u64_s8, vreinterpretq_u64_u16, vreinterpretq_u64_u32,
vreinterpretq_u64_u8, vreinterpretq_u8_s16, vreinterpretq_u8_s32, vreinterpretq_u8_s64,
vreinterpretq_u8_s8, vreinterpretq_u8_u16, vreinterpretq_u8_u32, vreinterpretq_u8_u64,
vreinterpretq_s32_f16, vreinterpretq_s32_f32, vreinterpretq_u16_f16, vreinterpretq_u16_f32,
vreinterpretq_u32_f16, vreinterpretq_u32_f32, vreinterpretq_u64_f16, vreinterpretq_u64_f32,
vreinterpretq_u8_f16, vreinterpretq_u8_f32, vreinterpretq_f16_f32, vreinterpretq_f16_s16,
vreinterpretq_f16_s32, vreinterpretq_f16_s64, vreinterpretq_f16_s8, vreinterpretq_f16_u16,
vreinterpretq_f16_u32, vreinterpretq_f16_u64, vreinterpretq_f16_u8, vreinterpretq_f32_f16,
vreinterpretq_f32_s16, vreinterpretq_f32_s32, vreinterpretq_f32_s64, vreinterpretq_f32_s8,
vreinterpretq_f32_u16, vreinterpretq_f32_u32, vreinterpretq_f32_u64, vreinterpretq_f32_u8,
vreinterpretq_s16_f16, vreinterpretq_s16_f32, vreinterpretq_s64_f16, vreinterpretq_s64_f32,
vreinterpretq_s8_f16, vreinterpretq_s8_f32, vuninitializedq_u8, vuninitializedq_u16,
vuninitializedq_u32, vuninitializedq_u64, vuninitializedq_s8, vuninitializedq_s16,
vuninitializedq_s32, vuninitializedq_s64, vuninitializedq_f16, vuninitializedq_f32 and
vuninitializedq.

Please refer to M-profile Vector Extension (MVE) intrinsics [1]  for more details.
[1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics

Regression tested on arm-none-eabi and found no regressions.

Ok for trunk?

Thanks,
Srinath.

gcc/ChangeLog:

2020-03-19  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>

	* config/arm/arm_mve.h (vreinterpretq_s16_s32): Define macro.
	(vreinterpretq_s16_s64): Likewise.
	(vreinterpretq_s16_s8): Likewise.
	(vreinterpretq_s16_u16): Likewise.
	(vreinterpretq_s16_u32): Likewise.
	(vreinterpretq_s16_u64): Likewise.
	(vreinterpretq_s16_u8): Likewise.
	(vreinterpretq_s32_s16): Likewise.
	(vreinterpretq_s32_s64): Likewise.
	(vreinterpretq_s32_s8): Likewise.
	(vreinterpretq_s32_u16): Likewise.
	(vreinterpretq_s32_u32): Likewise.
	(vreinterpretq_s32_u64): Likewise.
	(vreinterpretq_s32_u8): Likewise.
	(vreinterpretq_s64_s16): Likewise.
	(vreinterpretq_s64_s32): Likewise.
	(vreinterpretq_s64_s8): Likewise.
	(vreinterpretq_s64_u16): Likewise.
	(vreinterpretq_s64_u32): Likewise.
	(vreinterpretq_s64_u64): Likewise.
	(vreinterpretq_s64_u8): Likewise.
	(vreinterpretq_s8_s16): Likewise.
	(vreinterpretq_s8_s32): Likewise.
	(vreinterpretq_s8_s64): Likewise.
	(vreinterpretq_s8_u16): Likewise.
	(vreinterpretq_s8_u32): Likewise.
	(vreinterpretq_s8_u64): Likewise.
	(vreinterpretq_s8_u8): Likewise.
	(vreinterpretq_u16_s16): Likewise.
	(vreinterpretq_u16_s32): Likewise.
	(vreinterpretq_u16_s64): Likewise.
	(vreinterpretq_u16_s8): Likewise.
	(vreinterpretq_u16_u32): Likewise.
	(vreinterpretq_u16_u64): Likewise.
	(vreinterpretq_u16_u8): Likewise.
	(vreinterpretq_u32_s16): Likewise.
	(vreinterpretq_u32_s32): Likewise.
	(vreinterpretq_u32_s64): Likewise.
	(vreinterpretq_u32_s8): Likewise.
	(vreinterpretq_u32_u16): Likewise.
	(vreinterpretq_u32_u64): Likewise.
	(vreinterpretq_u32_u8): Likewise.
	(vreinterpretq_u64_s16): Likewise.
	(vreinterpretq_u64_s32): Likewise.
	(vreinterpretq_u64_s64): Likewise.
	(vreinterpretq_u64_s8): Likewise.
	(vreinterpretq_u64_u16): Likewise.
	(vreinterpretq_u64_u32): Likewise.
	(vreinterpretq_u64_u8): Likewise.
	(vreinterpretq_u8_s16): Likewise.
	(vreinterpretq_u8_s32): Likewise.
	(vreinterpretq_u8_s64): Likewise.
	(vreinterpretq_u8_s8): Likewise.
	(vreinterpretq_u8_u16): Likewise.
	(vreinterpretq_u8_u32): Likewise.
	(vreinterpretq_u8_u64): Likewise.
	(vreinterpretq_s32_f16): Likewise.
	(vreinterpretq_s32_f32): Likewise.
	(vreinterpretq_u16_f16): Likewise.
	(vreinterpretq_u16_f32): Likewise.
	(vreinterpretq_u32_f16): Likewise.
	(vreinterpretq_u32_f32): Likewise.
	(vreinterpretq_u64_f16): Likewise.
	(vreinterpretq_u64_f32): Likewise.
	(vreinterpretq_u8_f16): Likewise.
	(vreinterpretq_u8_f32): Likewise.
	(vreinterpretq_f16_f32): Likewise.
	(vreinterpretq_f16_s16): Likewise.
	(vreinterpretq_f16_s32): Likewise.
	(vreinterpretq_f16_s64): Likewise.
	(vreinterpretq_f16_s8): Likewise.
	(vreinterpretq_f16_u16): Likewise.
	(vreinterpretq_f16_u32): Likewise.
	(vreinterpretq_f16_u64): Likewise.
	(vreinterpretq_f16_u8): Likewise.
	(vreinterpretq_f32_f16): Likewise.
	(vreinterpretq_f32_s16): Likewise.
	(vreinterpretq_f32_s32): Likewise.
	(vreinterpretq_f32_s64): Likewise.
	(vreinterpretq_f32_s8): Likewise.
	(vreinterpretq_f32_u16): Likewise.
	(vreinterpretq_f32_u32): Likewise.
	(vreinterpretq_f32_u64): Likewise.
	(vreinterpretq_f32_u8): Likewise.
	(vreinterpretq_s16_f16): Likewise.
	(vreinterpretq_s16_f32): Likewise.
	(vreinterpretq_s64_f16): Likewise.
	(vreinterpretq_s64_f32): Likewise.
	(vreinterpretq_s8_f16): Likewise.
	(vreinterpretq_s8_f32): Likewise.
	(vuninitializedq_u8): Likewise.
	(vuninitializedq_u16): Likewise.
	(vuninitializedq_u32): Likewise.
	(vuninitializedq_u64): Likewise.
	(vuninitializedq_s8): Likewise.
	(vuninitializedq_s16): Likewise.
	(vuninitializedq_s32): Likewise.
	(vuninitializedq_s64): Likewise.
	(vuninitializedq_f16): Likewise.
	(vuninitializedq_f32): Likewise.
	(__arm_vuninitializedq_u8): Define intrinsic.
	(__arm_vuninitializedq_u16): Likewise.
	(__arm_vuninitializedq_u32): Likewise.
	(__arm_vuninitializedq_u64): Likewise.
	(__arm_vuninitializedq_s8): Likewise.
	(__arm_vuninitializedq_s16): Likewise.
	(__arm_vuninitializedq_s32): Likewise.
	(__arm_vuninitializedq_s64): Likewise.
	(__arm_vreinterpretq_s16_s32): Likewise.
	(__arm_vreinterpretq_s16_s64): Likewise.
	(__arm_vreinterpretq_s16_s8): Likewise.
	(__arm_vreinterpretq_s16_u16): Likewise.
	(__arm_vreinterpretq_s16_u32): Likewise.
	(__arm_vreinterpretq_s16_u64): Likewise.
	(__arm_vreinterpretq_s16_u8): Likewise.
	(__arm_vreinterpretq_s32_s16): Likewise.
	(__arm_vreinterpretq_s32_s64): Likewise.
	(__arm_vreinterpretq_s32_s8): Likewise.
	(__arm_vreinterpretq_s32_u16): Likewise.
	(__arm_vreinterpretq_s32_u32): Likewise.
	(__arm_vreinterpretq_s32_u64): Likewise.
	(__arm_vreinterpretq_s32_u8): Likewise.
	(__arm_vreinterpretq_s64_s16): Likewise.
	(__arm_vreinterpretq_s64_s32): Likewise.
	(__arm_vreinterpretq_s64_s8): Likewise.
	(__arm_vreinterpretq_s64_u16): Likewise.
	(__arm_vreinterpretq_s64_u32): Likewise.
	(__arm_vreinterpretq_s64_u64): Likewise.
	(__arm_vreinterpretq_s64_u8): Likewise.
	(__arm_vreinterpretq_s8_s16): Likewise.
	(__arm_vreinterpretq_s8_s32): Likewise.
	(__arm_vreinterpretq_s8_s64): Likewise.
	(__arm_vreinterpretq_s8_u16): Likewise.
	(__arm_vreinterpretq_s8_u32): Likewise.
	(__arm_vreinterpretq_s8_u64): Likewise.
	(__arm_vreinterpretq_s8_u8): Likewise.
	(__arm_vreinterpretq_u16_s16): Likewise.
	(__arm_vreinterpretq_u16_s32): Likewise.
	(__arm_vreinterpretq_u16_s64): Likewise.
	(__arm_vreinterpretq_u16_s8): Likewise.
	(__arm_vreinterpretq_u16_u32): Likewise.
	(__arm_vreinterpretq_u16_u64): Likewise.
	(__arm_vreinterpretq_u16_u8): Likewise.
	(__arm_vreinterpretq_u32_s16): Likewise.
	(__arm_vreinterpretq_u32_s32): Likewise.
	(__arm_vreinterpretq_u32_s64): Likewise.
	(__arm_vreinterpretq_u32_s8): Likewise.
	(__arm_vreinterpretq_u32_u16): Likewise.
	(__arm_vreinterpretq_u32_u64): Likewise.
	(__arm_vreinterpretq_u32_u8): Likewise.
	(__arm_vreinterpretq_u64_s16): Likewise.
	(__arm_vreinterpretq_u64_s32): Likewise.
	(__arm_vreinterpretq_u64_s64): Likewise.
	(__arm_vreinterpretq_u64_s8): Likewise.
	(__arm_vreinterpretq_u64_u16): Likewise.
	(__arm_vreinterpretq_u64_u32): Likewise.
	(__arm_vreinterpretq_u64_u8): Likewise.
	(__arm_vreinterpretq_u8_s16): Likewise.
	(__arm_vreinterpretq_u8_s32): Likewise.
	(__arm_vreinterpretq_u8_s64): Likewise.
	(__arm_vreinterpretq_u8_s8): Likewise.
	(__arm_vreinterpretq_u8_u16): Likewise.
	(__arm_vreinterpretq_u8_u32): Likewise.
	(__arm_vreinterpretq_u8_u64): Likewise.
	(__arm_vuninitializedq_f16): Likewise.
	(__arm_vuninitializedq_f32): Likewise.
	(__arm_vreinterpretq_s32_f16): Likewise.
	(__arm_vreinterpretq_s32_f32): Likewise.
	(__arm_vreinterpretq_s16_f16): Likewise.
	(__arm_vreinterpretq_s16_f32): Likewise.
	(__arm_vreinterpretq_s64_f16): Likewise.
	(__arm_vreinterpretq_s64_f32): Likewise.
	(__arm_vreinterpretq_s8_f16): Likewise.
	(__arm_vreinterpretq_s8_f32): Likewise.
	(__arm_vreinterpretq_u16_f16): Likewise.
	(__arm_vreinterpretq_u16_f32): Likewise.
	(__arm_vreinterpretq_u32_f16): Likewise.
	(__arm_vreinterpretq_u32_f32): Likewise.
	(__arm_vreinterpretq_u64_f16): Likewise.
	(__arm_vreinterpretq_u64_f32): Likewise.
	(__arm_vreinterpretq_u8_f16): Likewise.
	(__arm_vreinterpretq_u8_f32): Likewise.
	(__arm_vreinterpretq_f16_f32): Likewise.
	(__arm_vreinterpretq_f16_s16): Likewise.
	(__arm_vreinterpretq_f16_s32): Likewise.
	(__arm_vreinterpretq_f16_s64): Likewise.
	(__arm_vreinterpretq_f16_s8): Likewise.
	(__arm_vreinterpretq_f16_u16): Likewise.
	(__arm_vreinterpretq_f16_u32): Likewise.
	(__arm_vreinterpretq_f16_u64): Likewise.
	(__arm_vreinterpretq_f16_u8): Likewise.
	(__arm_vreinterpretq_f32_f16): Likewise.
	(__arm_vreinterpretq_f32_s16): Likewise.
	(__arm_vreinterpretq_f32_s32): Likewise.
	(__arm_vreinterpretq_f32_s64): Likewise.
	(__arm_vreinterpretq_f32_s8): Likewise.
	(__arm_vreinterpretq_f32_u16): Likewise.
	(__arm_vreinterpretq_f32_u32): Likewise.
	(__arm_vreinterpretq_f32_u64): Likewise.
	(__arm_vreinterpretq_f32_u8): Likewise.
	(vuninitializedq): Define polymorphic variant.
	(vreinterpretq_f16): Likewise.
	(vreinterpretq_f32): Likewise.
	(vreinterpretq_s16): Likewise.
	(vreinterpretq_s32): Likewise.
	(vreinterpretq_s64): Likewise.
	(vreinterpretq_s8): Likewise.
	(vreinterpretq_u16): Likewise.
	(vreinterpretq_u32): Likewise.
	(vreinterpretq_u64): Likewise.
	(vreinterpretq_u8): Likewise.

gcc/testsuite/ChangeLog:

2020-03-19  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>

	* gcc.target/arm/mve/intrinsics/vuninitializedq_float.c: New test.
	* gcc.target/arm/mve/intrinsics/vuninitializedq_float1.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vuninitializedq_int.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vuninitializedq_int1.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vreinterpretq_f16.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vreinterpretq_f32.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vreinterpretq_s16.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vreinterpretq_s32.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vreinterpretq_s64.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vreinterpretq_s8.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vreinterpretq_u16.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vreinterpretq_u32.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vreinterpretq_u64.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vreinterpretq_u8.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vuninitializedq_float.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vuninitializedq_float1.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vuninitializedq_int.c: Likewise.
	* gcc.target/arm/mve/intrinsics/vuninitializedq_int1.c: Likewise.



###############     Attachment also inlined for ease of reply    ###############

Comments

Kyrylo Tkachov March 20, 2020, 11:51 a.m. UTC | #1
Hi Srinath,

> -----Original Message-----
> From: Srinath Parvathaneni <Srinath.Parvathaneni@arm.com>
> Sent: 19 March 2020 17:59
> To: gcc-patches@gcc.gnu.org
> Cc: Kyrylo Tkachov <Kyrylo.Tkachov@arm.com>
> Subject: [PATCH v2][ARM][GCC][7x]: MVE vreinterpretq and vuninitializedq
> intrinsics.
> 
> Hello Kyrill,
> 
> This patch addresses all the comments in patch version v2.
> (version v2) https://gcc.gnu.org/pipermail/gcc-patches/2019-
> November/534351.html
> 
> ####
> 
> Hello,
> 
> This patch supports following MVE ACLE intrinsics.
> 
> vreinterpretq_s16_s32, vreinterpretq_s16_s64, vreinterpretq_s16_s8,
> vreinterpretq_s16_u16,
> vreinterpretq_s16_u32, vreinterpretq_s16_u64, vreinterpretq_s16_u8,
> vreinterpretq_s32_s16,
> vreinterpretq_s32_s64, vreinterpretq_s32_s8, vreinterpretq_s32_u16,
> vreinterpretq_s32_u32,
> vreinterpretq_s32_u64, vreinterpretq_s32_u8, vreinterpretq_s64_s16,
> vreinterpretq_s64_s32,
> vreinterpretq_s64_s8, vreinterpretq_s64_u16, vreinterpretq_s64_u32,
> vreinterpretq_s64_u64,
> vreinterpretq_s64_u8, vreinterpretq_s8_s16, vreinterpretq_s8_s32,
> vreinterpretq_s8_s64,
> vreinterpretq_s8_u16, vreinterpretq_s8_u32, vreinterpretq_s8_u64,
> vreinterpretq_s8_u8,
> vreinterpretq_u16_s16, vreinterpretq_u16_s32, vreinterpretq_u16_s64,
> vreinterpretq_u16_s8,
> vreinterpretq_u16_u32, vreinterpretq_u16_u64, vreinterpretq_u16_u8,
> vreinterpretq_u32_s16,
> vreinterpretq_u32_s32, vreinterpretq_u32_s64, vreinterpretq_u32_s8,
> vreinterpretq_u32_u16,
> vreinterpretq_u32_u64, vreinterpretq_u32_u8, vreinterpretq_u64_s16,
> vreinterpretq_u64_s32,
> vreinterpretq_u64_s64, vreinterpretq_u64_s8, vreinterpretq_u64_u16,
> vreinterpretq_u64_u32,
> vreinterpretq_u64_u8, vreinterpretq_u8_s16, vreinterpretq_u8_s32,
> vreinterpretq_u8_s64,
> vreinterpretq_u8_s8, vreinterpretq_u8_u16, vreinterpretq_u8_u32,
> vreinterpretq_u8_u64,
> vreinterpretq_s32_f16, vreinterpretq_s32_f32, vreinterpretq_u16_f16,
> vreinterpretq_u16_f32,
> vreinterpretq_u32_f16, vreinterpretq_u32_f32, vreinterpretq_u64_f16,
> vreinterpretq_u64_f32,
> vreinterpretq_u8_f16, vreinterpretq_u8_f32, vreinterpretq_f16_f32,
> vreinterpretq_f16_s16,
> vreinterpretq_f16_s32, vreinterpretq_f16_s64, vreinterpretq_f16_s8,
> vreinterpretq_f16_u16,
> vreinterpretq_f16_u32, vreinterpretq_f16_u64, vreinterpretq_f16_u8,
> vreinterpretq_f32_f16,
> vreinterpretq_f32_s16, vreinterpretq_f32_s32, vreinterpretq_f32_s64,
> vreinterpretq_f32_s8,
> vreinterpretq_f32_u16, vreinterpretq_f32_u32, vreinterpretq_f32_u64,
> vreinterpretq_f32_u8,
> vreinterpretq_s16_f16, vreinterpretq_s16_f32, vreinterpretq_s64_f16,
> vreinterpretq_s64_f32,
> vreinterpretq_s8_f16, vreinterpretq_s8_f32, vuninitializedq_u8,
> vuninitializedq_u16,
> vuninitializedq_u32, vuninitializedq_u64, vuninitializedq_s8,
> vuninitializedq_s16,
> vuninitializedq_s32, vuninitializedq_s64, vuninitializedq_f16,
> vuninitializedq_f32 and
> vuninitializedq.
> 
> Please refer to M-profile Vector Extension (MVE) intrinsics [1]  for more
> details.
> [1] https://developer.arm.com/architectures/instruction-sets/simd-
> isas/helium/mve-intrinsics
> 
> Regression tested on arm-none-eabi and found no regressions.
> 
> Ok for trunk?

Thanks, I've pushed this patch to master.
Kyrill

> 
> Thanks,
> Srinath.
> 
> gcc/ChangeLog:
> 
> 2020-03-19  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
> 
> 	* config/arm/arm_mve.h (vreinterpretq_s16_s32): Define macro.
> 	(vreinterpretq_s16_s64): Likewise.
> 	(vreinterpretq_s16_s8): Likewise.
> 	(vreinterpretq_s16_u16): Likewise.
> 	(vreinterpretq_s16_u32): Likewise.
> 	(vreinterpretq_s16_u64): Likewise.
> 	(vreinterpretq_s16_u8): Likewise.
> 	(vreinterpretq_s32_s16): Likewise.
> 	(vreinterpretq_s32_s64): Likewise.
> 	(vreinterpretq_s32_s8): Likewise.
> 	(vreinterpretq_s32_u16): Likewise.
> 	(vreinterpretq_s32_u32): Likewise.
> 	(vreinterpretq_s32_u64): Likewise.
> 	(vreinterpretq_s32_u8): Likewise.
> 	(vreinterpretq_s64_s16): Likewise.
> 	(vreinterpretq_s64_s32): Likewise.
> 	(vreinterpretq_s64_s8): Likewise.
> 	(vreinterpretq_s64_u16): Likewise.
> 	(vreinterpretq_s64_u32): Likewise.
> 	(vreinterpretq_s64_u64): Likewise.
> 	(vreinterpretq_s64_u8): Likewise.
> 	(vreinterpretq_s8_s16): Likewise.
> 	(vreinterpretq_s8_s32): Likewise.
> 	(vreinterpretq_s8_s64): Likewise.
> 	(vreinterpretq_s8_u16): Likewise.
> 	(vreinterpretq_s8_u32): Likewise.
> 	(vreinterpretq_s8_u64): Likewise.
> 	(vreinterpretq_s8_u8): Likewise.
> 	(vreinterpretq_u16_s16): Likewise.
> 	(vreinterpretq_u16_s32): Likewise.
> 	(vreinterpretq_u16_s64): Likewise.
> 	(vreinterpretq_u16_s8): Likewise.
> 	(vreinterpretq_u16_u32): Likewise.
> 	(vreinterpretq_u16_u64): Likewise.
> 	(vreinterpretq_u16_u8): Likewise.
> 	(vreinterpretq_u32_s16): Likewise.
> 	(vreinterpretq_u32_s32): Likewise.
> 	(vreinterpretq_u32_s64): Likewise.
> 	(vreinterpretq_u32_s8): Likewise.
> 	(vreinterpretq_u32_u16): Likewise.
> 	(vreinterpretq_u32_u64): Likewise.
> 	(vreinterpretq_u32_u8): Likewise.
> 	(vreinterpretq_u64_s16): Likewise.
> 	(vreinterpretq_u64_s32): Likewise.
> 	(vreinterpretq_u64_s64): Likewise.
> 	(vreinterpretq_u64_s8): Likewise.
> 	(vreinterpretq_u64_u16): Likewise.
> 	(vreinterpretq_u64_u32): Likewise.
> 	(vreinterpretq_u64_u8): Likewise.
> 	(vreinterpretq_u8_s16): Likewise.
> 	(vreinterpretq_u8_s32): Likewise.
> 	(vreinterpretq_u8_s64): Likewise.
> 	(vreinterpretq_u8_s8): Likewise.
> 	(vreinterpretq_u8_u16): Likewise.
> 	(vreinterpretq_u8_u32): Likewise.
> 	(vreinterpretq_u8_u64): Likewise.
> 	(vreinterpretq_s32_f16): Likewise.
> 	(vreinterpretq_s32_f32): Likewise.
> 	(vreinterpretq_u16_f16): Likewise.
> 	(vreinterpretq_u16_f32): Likewise.
> 	(vreinterpretq_u32_f16): Likewise.
> 	(vreinterpretq_u32_f32): Likewise.
> 	(vreinterpretq_u64_f16): Likewise.
> 	(vreinterpretq_u64_f32): Likewise.
> 	(vreinterpretq_u8_f16): Likewise.
> 	(vreinterpretq_u8_f32): Likewise.
> 	(vreinterpretq_f16_f32): Likewise.
> 	(vreinterpretq_f16_s16): Likewise.
> 	(vreinterpretq_f16_s32): Likewise.
> 	(vreinterpretq_f16_s64): Likewise.
> 	(vreinterpretq_f16_s8): Likewise.
> 	(vreinterpretq_f16_u16): Likewise.
> 	(vreinterpretq_f16_u32): Likewise.
> 	(vreinterpretq_f16_u64): Likewise.
> 	(vreinterpretq_f16_u8): Likewise.
> 	(vreinterpretq_f32_f16): Likewise.
> 	(vreinterpretq_f32_s16): Likewise.
> 	(vreinterpretq_f32_s32): Likewise.
> 	(vreinterpretq_f32_s64): Likewise.
> 	(vreinterpretq_f32_s8): Likewise.
> 	(vreinterpretq_f32_u16): Likewise.
> 	(vreinterpretq_f32_u32): Likewise.
> 	(vreinterpretq_f32_u64): Likewise.
> 	(vreinterpretq_f32_u8): Likewise.
> 	(vreinterpretq_s16_f16): Likewise.
> 	(vreinterpretq_s16_f32): Likewise.
> 	(vreinterpretq_s64_f16): Likewise.
> 	(vreinterpretq_s64_f32): Likewise.
> 	(vreinterpretq_s8_f16): Likewise.
> 	(vreinterpretq_s8_f32): Likewise.
> 	(vuninitializedq_u8): Likewise.
> 	(vuninitializedq_u16): Likewise.
> 	(vuninitializedq_u32): Likewise.
> 	(vuninitializedq_u64): Likewise.
> 	(vuninitializedq_s8): Likewise.
> 	(vuninitializedq_s16): Likewise.
> 	(vuninitializedq_s32): Likewise.
> 	(vuninitializedq_s64): Likewise.
> 	(vuninitializedq_f16): Likewise.
> 	(vuninitializedq_f32): Likewise.
> 	(__arm_vuninitializedq_u8): Define intrinsic.
> 	(__arm_vuninitializedq_u16): Likewise.
> 	(__arm_vuninitializedq_u32): Likewise.
> 	(__arm_vuninitializedq_u64): Likewise.
> 	(__arm_vuninitializedq_s8): Likewise.
> 	(__arm_vuninitializedq_s16): Likewise.
> 	(__arm_vuninitializedq_s32): Likewise.
> 	(__arm_vuninitializedq_s64): Likewise.
> 	(__arm_vreinterpretq_s16_s32): Likewise.
> 	(__arm_vreinterpretq_s16_s64): Likewise.
> 	(__arm_vreinterpretq_s16_s8): Likewise.
> 	(__arm_vreinterpretq_s16_u16): Likewise.
> 	(__arm_vreinterpretq_s16_u32): Likewise.
> 	(__arm_vreinterpretq_s16_u64): Likewise.
> 	(__arm_vreinterpretq_s16_u8): Likewise.
> 	(__arm_vreinterpretq_s32_s16): Likewise.
> 	(__arm_vreinterpretq_s32_s64): Likewise.
> 	(__arm_vreinterpretq_s32_s8): Likewise.
> 	(__arm_vreinterpretq_s32_u16): Likewise.
> 	(__arm_vreinterpretq_s32_u32): Likewise.
> 	(__arm_vreinterpretq_s32_u64): Likewise.
> 	(__arm_vreinterpretq_s32_u8): Likewise.
> 	(__arm_vreinterpretq_s64_s16): Likewise.
> 	(__arm_vreinterpretq_s64_s32): Likewise.
> 	(__arm_vreinterpretq_s64_s8): Likewise.
> 	(__arm_vreinterpretq_s64_u16): Likewise.
> 	(__arm_vreinterpretq_s64_u32): Likewise.
> 	(__arm_vreinterpretq_s64_u64): Likewise.
> 	(__arm_vreinterpretq_s64_u8): Likewise.
> 	(__arm_vreinterpretq_s8_s16): Likewise.
> 	(__arm_vreinterpretq_s8_s32): Likewise.
> 	(__arm_vreinterpretq_s8_s64): Likewise.
> 	(__arm_vreinterpretq_s8_u16): Likewise.
> 	(__arm_vreinterpretq_s8_u32): Likewise.
> 	(__arm_vreinterpretq_s8_u64): Likewise.
> 	(__arm_vreinterpretq_s8_u8): Likewise.
> 	(__arm_vreinterpretq_u16_s16): Likewise.
> 	(__arm_vreinterpretq_u16_s32): Likewise.
> 	(__arm_vreinterpretq_u16_s64): Likewise.
> 	(__arm_vreinterpretq_u16_s8): Likewise.
> 	(__arm_vreinterpretq_u16_u32): Likewise.
> 	(__arm_vreinterpretq_u16_u64): Likewise.
> 	(__arm_vreinterpretq_u16_u8): Likewise.
> 	(__arm_vreinterpretq_u32_s16): Likewise.
> 	(__arm_vreinterpretq_u32_s32): Likewise.
> 	(__arm_vreinterpretq_u32_s64): Likewise.
> 	(__arm_vreinterpretq_u32_s8): Likewise.
> 	(__arm_vreinterpretq_u32_u16): Likewise.
> 	(__arm_vreinterpretq_u32_u64): Likewise.
> 	(__arm_vreinterpretq_u32_u8): Likewise.
> 	(__arm_vreinterpretq_u64_s16): Likewise.
> 	(__arm_vreinterpretq_u64_s32): Likewise.
> 	(__arm_vreinterpretq_u64_s64): Likewise.
> 	(__arm_vreinterpretq_u64_s8): Likewise.
> 	(__arm_vreinterpretq_u64_u16): Likewise.
> 	(__arm_vreinterpretq_u64_u32): Likewise.
> 	(__arm_vreinterpretq_u64_u8): Likewise.
> 	(__arm_vreinterpretq_u8_s16): Likewise.
> 	(__arm_vreinterpretq_u8_s32): Likewise.
> 	(__arm_vreinterpretq_u8_s64): Likewise.
> 	(__arm_vreinterpretq_u8_s8): Likewise.
> 	(__arm_vreinterpretq_u8_u16): Likewise.
> 	(__arm_vreinterpretq_u8_u32): Likewise.
> 	(__arm_vreinterpretq_u8_u64): Likewise.
> 	(__arm_vuninitializedq_f16): Likewise.
> 	(__arm_vuninitializedq_f32): Likewise.
> 	(__arm_vreinterpretq_s32_f16): Likewise.
> 	(__arm_vreinterpretq_s32_f32): Likewise.
> 	(__arm_vreinterpretq_s16_f16): Likewise.
> 	(__arm_vreinterpretq_s16_f32): Likewise.
> 	(__arm_vreinterpretq_s64_f16): Likewise.
> 	(__arm_vreinterpretq_s64_f32): Likewise.
> 	(__arm_vreinterpretq_s8_f16): Likewise.
> 	(__arm_vreinterpretq_s8_f32): Likewise.
> 	(__arm_vreinterpretq_u16_f16): Likewise.
> 	(__arm_vreinterpretq_u16_f32): Likewise.
> 	(__arm_vreinterpretq_u32_f16): Likewise.
> 	(__arm_vreinterpretq_u32_f32): Likewise.
> 	(__arm_vreinterpretq_u64_f16): Likewise.
> 	(__arm_vreinterpretq_u64_f32): Likewise.
> 	(__arm_vreinterpretq_u8_f16): Likewise.
> 	(__arm_vreinterpretq_u8_f32): Likewise.
> 	(__arm_vreinterpretq_f16_f32): Likewise.
> 	(__arm_vreinterpretq_f16_s16): Likewise.
> 	(__arm_vreinterpretq_f16_s32): Likewise.
> 	(__arm_vreinterpretq_f16_s64): Likewise.
> 	(__arm_vreinterpretq_f16_s8): Likewise.
> 	(__arm_vreinterpretq_f16_u16): Likewise.
> 	(__arm_vreinterpretq_f16_u32): Likewise.
> 	(__arm_vreinterpretq_f16_u64): Likewise.
> 	(__arm_vreinterpretq_f16_u8): Likewise.
> 	(__arm_vreinterpretq_f32_f16): Likewise.
> 	(__arm_vreinterpretq_f32_s16): Likewise.
> 	(__arm_vreinterpretq_f32_s32): Likewise.
> 	(__arm_vreinterpretq_f32_s64): Likewise.
> 	(__arm_vreinterpretq_f32_s8): Likewise.
> 	(__arm_vreinterpretq_f32_u16): Likewise.
> 	(__arm_vreinterpretq_f32_u32): Likewise.
> 	(__arm_vreinterpretq_f32_u64): Likewise.
> 	(__arm_vreinterpretq_f32_u8): Likewise.
> 	(vuninitializedq): Define polymorphic variant.
> 	(vreinterpretq_f16): Likewise.
> 	(vreinterpretq_f32): Likewise.
> 	(vreinterpretq_s16): Likewise.
> 	(vreinterpretq_s32): Likewise.
> 	(vreinterpretq_s64): Likewise.
> 	(vreinterpretq_s8): Likewise.
> 	(vreinterpretq_u16): Likewise.
> 	(vreinterpretq_u32): Likewise.
> 	(vreinterpretq_u64): Likewise.
> 	(vreinterpretq_u8): Likewise.
> 
> gcc/testsuite/ChangeLog:
> 
> 2020-03-19  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
> 
> 	* gcc.target/arm/mve/intrinsics/vuninitializedq_float.c: New test.
> 	* gcc.target/arm/mve/intrinsics/vuninitializedq_float1.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vuninitializedq_int.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vuninitializedq_int1.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vreinterpretq_f16.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vreinterpretq_f32.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vreinterpretq_s16.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vreinterpretq_s32.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vreinterpretq_s64.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vreinterpretq_s8.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vreinterpretq_u16.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vreinterpretq_u32.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vreinterpretq_u64.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vreinterpretq_u8.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vuninitializedq_float.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vuninitializedq_float1.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vuninitializedq_int.c: Likewise.
> 	* gcc.target/arm/mve/intrinsics/vuninitializedq_int1.c: Likewise.
> 
> 
> 
> ###############     Attachment also inlined for ease of reply
> ###############
> 
> 
> diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
> index
> 55c256910bb7f4c616ea592be699f7f4fc3f17f7..916565c9b55bae77869669fd1
> e8f8b7f4a37b52e 100644
> --- a/gcc/config/arm/arm_mve.h
> +++ b/gcc/config/arm/arm_mve.h
> @@ -1906,6 +1906,106 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
>  #define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b)
>  #define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b)
>  #define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b)
> +#define vreinterpretq_s16_s32(__a) __arm_vreinterpretq_s16_s32(__a)
> +#define vreinterpretq_s16_s64(__a) __arm_vreinterpretq_s16_s64(__a)
> +#define vreinterpretq_s16_s8(__a) __arm_vreinterpretq_s16_s8(__a)
> +#define vreinterpretq_s16_u16(__a) __arm_vreinterpretq_s16_u16(__a)
> +#define vreinterpretq_s16_u32(__a) __arm_vreinterpretq_s16_u32(__a)
> +#define vreinterpretq_s16_u64(__a) __arm_vreinterpretq_s16_u64(__a)
> +#define vreinterpretq_s16_u8(__a) __arm_vreinterpretq_s16_u8(__a)
> +#define vreinterpretq_s32_s16(__a) __arm_vreinterpretq_s32_s16(__a)
> +#define vreinterpretq_s32_s64(__a) __arm_vreinterpretq_s32_s64(__a)
> +#define vreinterpretq_s32_s8(__a) __arm_vreinterpretq_s32_s8(__a)
> +#define vreinterpretq_s32_u16(__a) __arm_vreinterpretq_s32_u16(__a)
> +#define vreinterpretq_s32_u32(__a) __arm_vreinterpretq_s32_u32(__a)
> +#define vreinterpretq_s32_u64(__a) __arm_vreinterpretq_s32_u64(__a)
> +#define vreinterpretq_s32_u8(__a) __arm_vreinterpretq_s32_u8(__a)
> +#define vreinterpretq_s64_s16(__a) __arm_vreinterpretq_s64_s16(__a)
> +#define vreinterpretq_s64_s32(__a) __arm_vreinterpretq_s64_s32(__a)
> +#define vreinterpretq_s64_s8(__a) __arm_vreinterpretq_s64_s8(__a)
> +#define vreinterpretq_s64_u16(__a) __arm_vreinterpretq_s64_u16(__a)
> +#define vreinterpretq_s64_u32(__a) __arm_vreinterpretq_s64_u32(__a)
> +#define vreinterpretq_s64_u64(__a) __arm_vreinterpretq_s64_u64(__a)
> +#define vreinterpretq_s64_u8(__a) __arm_vreinterpretq_s64_u8(__a)
> +#define vreinterpretq_s8_s16(__a) __arm_vreinterpretq_s8_s16(__a)
> +#define vreinterpretq_s8_s32(__a) __arm_vreinterpretq_s8_s32(__a)
> +#define vreinterpretq_s8_s64(__a) __arm_vreinterpretq_s8_s64(__a)
> +#define vreinterpretq_s8_u16(__a) __arm_vreinterpretq_s8_u16(__a)
> +#define vreinterpretq_s8_u32(__a) __arm_vreinterpretq_s8_u32(__a)
> +#define vreinterpretq_s8_u64(__a) __arm_vreinterpretq_s8_u64(__a)
> +#define vreinterpretq_s8_u8(__a) __arm_vreinterpretq_s8_u8(__a)
> +#define vreinterpretq_u16_s16(__a) __arm_vreinterpretq_u16_s16(__a)
> +#define vreinterpretq_u16_s32(__a) __arm_vreinterpretq_u16_s32(__a)
> +#define vreinterpretq_u16_s64(__a) __arm_vreinterpretq_u16_s64(__a)
> +#define vreinterpretq_u16_s8(__a) __arm_vreinterpretq_u16_s8(__a)
> +#define vreinterpretq_u16_u32(__a) __arm_vreinterpretq_u16_u32(__a)
> +#define vreinterpretq_u16_u64(__a) __arm_vreinterpretq_u16_u64(__a)
> +#define vreinterpretq_u16_u8(__a) __arm_vreinterpretq_u16_u8(__a)
> +#define vreinterpretq_u32_s16(__a) __arm_vreinterpretq_u32_s16(__a)
> +#define vreinterpretq_u32_s32(__a) __arm_vreinterpretq_u32_s32(__a)
> +#define vreinterpretq_u32_s64(__a) __arm_vreinterpretq_u32_s64(__a)
> +#define vreinterpretq_u32_s8(__a) __arm_vreinterpretq_u32_s8(__a)
> +#define vreinterpretq_u32_u16(__a) __arm_vreinterpretq_u32_u16(__a)
> +#define vreinterpretq_u32_u64(__a) __arm_vreinterpretq_u32_u64(__a)
> +#define vreinterpretq_u32_u8(__a) __arm_vreinterpretq_u32_u8(__a)
> +#define vreinterpretq_u64_s16(__a) __arm_vreinterpretq_u64_s16(__a)
> +#define vreinterpretq_u64_s32(__a) __arm_vreinterpretq_u64_s32(__a)
> +#define vreinterpretq_u64_s64(__a) __arm_vreinterpretq_u64_s64(__a)
> +#define vreinterpretq_u64_s8(__a) __arm_vreinterpretq_u64_s8(__a)
> +#define vreinterpretq_u64_u16(__a) __arm_vreinterpretq_u64_u16(__a)
> +#define vreinterpretq_u64_u32(__a) __arm_vreinterpretq_u64_u32(__a)
> +#define vreinterpretq_u64_u8(__a) __arm_vreinterpretq_u64_u8(__a)
> +#define vreinterpretq_u8_s16(__a) __arm_vreinterpretq_u8_s16(__a)
> +#define vreinterpretq_u8_s32(__a) __arm_vreinterpretq_u8_s32(__a)
> +#define vreinterpretq_u8_s64(__a) __arm_vreinterpretq_u8_s64(__a)
> +#define vreinterpretq_u8_s8(__a) __arm_vreinterpretq_u8_s8(__a)
> +#define vreinterpretq_u8_u16(__a) __arm_vreinterpretq_u8_u16(__a)
> +#define vreinterpretq_u8_u32(__a) __arm_vreinterpretq_u8_u32(__a)
> +#define vreinterpretq_u8_u64(__a) __arm_vreinterpretq_u8_u64(__a)
> +#define vreinterpretq_s32_f16(__a) __arm_vreinterpretq_s32_f16(__a)
> +#define vreinterpretq_s32_f32(__a) __arm_vreinterpretq_s32_f32(__a)
> +#define vreinterpretq_u16_f16(__a) __arm_vreinterpretq_u16_f16(__a)
> +#define vreinterpretq_u16_f32(__a) __arm_vreinterpretq_u16_f32(__a)
> +#define vreinterpretq_u32_f16(__a) __arm_vreinterpretq_u32_f16(__a)
> +#define vreinterpretq_u32_f32(__a) __arm_vreinterpretq_u32_f32(__a)
> +#define vreinterpretq_u64_f16(__a) __arm_vreinterpretq_u64_f16(__a)
> +#define vreinterpretq_u64_f32(__a) __arm_vreinterpretq_u64_f32(__a)
> +#define vreinterpretq_u8_f16(__a) __arm_vreinterpretq_u8_f16(__a)
> +#define vreinterpretq_u8_f32(__a) __arm_vreinterpretq_u8_f32(__a)
> +#define vreinterpretq_f16_f32(__a) __arm_vreinterpretq_f16_f32(__a)
> +#define vreinterpretq_f16_s16(__a) __arm_vreinterpretq_f16_s16(__a)
> +#define vreinterpretq_f16_s32(__a) __arm_vreinterpretq_f16_s32(__a)
> +#define vreinterpretq_f16_s64(__a) __arm_vreinterpretq_f16_s64(__a)
> +#define vreinterpretq_f16_s8(__a) __arm_vreinterpretq_f16_s8(__a)
> +#define vreinterpretq_f16_u16(__a) __arm_vreinterpretq_f16_u16(__a)
> +#define vreinterpretq_f16_u32(__a) __arm_vreinterpretq_f16_u32(__a)
> +#define vreinterpretq_f16_u64(__a) __arm_vreinterpretq_f16_u64(__a)
> +#define vreinterpretq_f16_u8(__a) __arm_vreinterpretq_f16_u8(__a)
> +#define vreinterpretq_f32_f16(__a) __arm_vreinterpretq_f32_f16(__a)
> +#define vreinterpretq_f32_s16(__a) __arm_vreinterpretq_f32_s16(__a)
> +#define vreinterpretq_f32_s32(__a) __arm_vreinterpretq_f32_s32(__a)
> +#define vreinterpretq_f32_s64(__a) __arm_vreinterpretq_f32_s64(__a)
> +#define vreinterpretq_f32_s8(__a) __arm_vreinterpretq_f32_s8(__a)
> +#define vreinterpretq_f32_u16(__a) __arm_vreinterpretq_f32_u16(__a)
> +#define vreinterpretq_f32_u32(__a) __arm_vreinterpretq_f32_u32(__a)
> +#define vreinterpretq_f32_u64(__a) __arm_vreinterpretq_f32_u64(__a)
> +#define vreinterpretq_f32_u8(__a) __arm_vreinterpretq_f32_u8(__a)
> +#define vreinterpretq_s16_f16(__a) __arm_vreinterpretq_s16_f16(__a)
> +#define vreinterpretq_s16_f32(__a) __arm_vreinterpretq_s16_f32(__a)
> +#define vreinterpretq_s64_f16(__a) __arm_vreinterpretq_s64_f16(__a)
> +#define vreinterpretq_s64_f32(__a) __arm_vreinterpretq_s64_f32(__a)
> +#define vreinterpretq_s8_f16(__a) __arm_vreinterpretq_s8_f16(__a)
> +#define vreinterpretq_s8_f32(__a) __arm_vreinterpretq_s8_f32(__a)
> +#define vuninitializedq_u8(void) __arm_vuninitializedq_u8(void)
> +#define vuninitializedq_u16(void) __arm_vuninitializedq_u16(void)
> +#define vuninitializedq_u32(void) __arm_vuninitializedq_u32(void)
> +#define vuninitializedq_u64(void) __arm_vuninitializedq_u64(void)
> +#define vuninitializedq_s8(void) __arm_vuninitializedq_s8(void)
> +#define vuninitializedq_s16(void) __arm_vuninitializedq_s16(void)
> +#define vuninitializedq_s32(void) __arm_vuninitializedq_s32(void)
> +#define vuninitializedq_s64(void) __arm_vuninitializedq_s64(void)
> +#define vuninitializedq_f16(void) __arm_vuninitializedq_f16(void)
> +#define vuninitializedq_f32(void) __arm_vuninitializedq_f32(void)
>  #endif
> 
>  __extension__ extern __inline void
> @@ -12391,6 +12491,471 @@ __arm_vaddq_u32 (uint32x4_t __a,
> uint32x4_t __b)
>    return __a + __b;
>  }
> 
> +__extension__ extern __inline uint8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vuninitializedq_u8 (void)
> +{
> +  uint8x16_t __uninit;
> +  __asm__ ("": "=w"(__uninit));
> +  return __uninit;
> +}
> +
> +__extension__ extern __inline uint16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vuninitializedq_u16 (void)
> +{
> +  uint16x8_t __uninit;
> +  __asm__ ("": "=w"(__uninit));
> +  return __uninit;
> +}
> +
> +__extension__ extern __inline uint32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vuninitializedq_u32 (void)
> +{
> +  uint32x4_t __uninit;
> +  __asm__ ("": "=w"(__uninit));
> +  return __uninit;
> +}
> +
> +__extension__ extern __inline uint64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vuninitializedq_u64 (void)
> +{
> +  uint64x2_t __uninit;
> +  __asm__ ("": "=w"(__uninit));
> +  return __uninit;
> +}
> +
> +__extension__ extern __inline int8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vuninitializedq_s8 (void)
> +{
> +  int8x16_t __uninit;
> +  __asm__ ("": "=w"(__uninit));
> +  return __uninit;
> +}
> +
> +__extension__ extern __inline int16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vuninitializedq_s16 (void)
> +{
> +  int16x8_t __uninit;
> +  __asm__ ("": "=w"(__uninit));
> +  return __uninit;
> +}
> +
> +__extension__ extern __inline int32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vuninitializedq_s32 (void)
> +{
> +  int32x4_t __uninit;
> +  __asm__ ("": "=w"(__uninit));
> +  return __uninit;
> +}
> +
> +__extension__ extern __inline int64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vuninitializedq_s64 (void)
> +{
> +  int64x2_t __uninit;
> +  __asm__ ("": "=w"(__uninit));
> +  return __uninit;
> +}
> +
> +__extension__ extern __inline int16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s16_s32 (int32x4_t __a)
> +{
> +  return (int16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline int16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s16_s64 (int64x2_t __a)
> +{
> +  return (int16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline int16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s16_s8 (int8x16_t __a)
> +{
> +  return (int16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline int16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s16_u16 (uint16x8_t __a)
> +{
> +  return (int16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline int16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s16_u32 (uint32x4_t __a)
> +{
> +  return (int16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline int16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s16_u64 (uint64x2_t __a)
> +{
> +  return (int16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline int16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s16_u8 (uint8x16_t __a)
> +{
> +  return (int16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline int32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s32_s16 (int16x8_t __a)
> +{
> +  return (int32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline int32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s32_s64 (int64x2_t __a)
> +{
> +  return (int32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline int32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s32_s8 (int8x16_t __a)
> +{
> +  return (int32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline int32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s32_u16 (uint16x8_t __a)
> +{
> +  return (int32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline int32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s32_u32 (uint32x4_t __a)
> +{
> +  return (int32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline int32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s32_u64 (uint64x2_t __a)
> +{
> +  return (int32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline int32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s32_u8 (uint8x16_t __a)
> +{
> +  return (int32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline int64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s64_s16 (int16x8_t __a)
> +{
> +  return (int64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline int64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s64_s32 (int32x4_t __a)
> +{
> +  return (int64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline int64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s64_s8 (int8x16_t __a)
> +{
> +  return (int64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline int64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s64_u16 (uint16x8_t __a)
> +{
> +  return (int64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline int64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s64_u32 (uint32x4_t __a)
> +{
> +  return (int64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline int64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s64_u64 (uint64x2_t __a)
> +{
> +  return (int64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline int64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s64_u8 (uint8x16_t __a)
> +{
> +  return (int64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline int8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s8_s16 (int16x8_t __a)
> +{
> +  return (int8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline int8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s8_s32 (int32x4_t __a)
> +{
> +  return (int8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline int8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s8_s64 (int64x2_t __a)
> +{
> +  return (int8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline int8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s8_u16 (uint16x8_t __a)
> +{
> +  return (int8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline int8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s8_u32 (uint32x4_t __a)
> +{
> +  return (int8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline int8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s8_u64 (uint64x2_t __a)
> +{
> +  return (int8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline int8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s8_u8 (uint8x16_t __a)
> +{
> +  return (int8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline uint16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u16_s16 (int16x8_t __a)
> +{
> +  return (uint16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline uint16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u16_s32 (int32x4_t __a)
> +{
> +  return (uint16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline uint16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u16_s64 (int64x2_t __a)
> +{
> +  return (uint16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline uint16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u16_s8 (int8x16_t __a)
> +{
> +  return (uint16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline uint16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u16_u32 (uint32x4_t __a)
> +{
> +  return (uint16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline uint16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u16_u64 (uint64x2_t __a)
> +{
> +  return (uint16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline uint16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u16_u8 (uint8x16_t __a)
> +{
> +  return (uint16x8_t)  __a;
> +}
> +
> +
> +__extension__ extern __inline uint32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u32_s16 (int16x8_t __a)
> +{
> +  return (uint32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline uint32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u32_s32 (int32x4_t __a)
> +{
> +  return (uint32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline uint32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u32_s64 (int64x2_t __a)
> +{
> +  return (uint32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline uint32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u32_s8 (int8x16_t __a)
> +{
> +  return (uint32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline uint32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u32_u16 (uint16x8_t __a)
> +{
> +  return (uint32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline uint32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u32_u64 (uint64x2_t __a)
> +{
> +  return (uint32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline uint32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u32_u8 (uint8x16_t __a)
> +{
> +  return (uint32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline uint64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u64_s16 (int16x8_t __a)
> +{
> +  return (uint64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline uint64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u64_s32 (int32x4_t __a)
> +{
> +  return (uint64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline uint64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u64_s64 (int64x2_t __a)
> +{
> +  return (uint64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline uint64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u64_s8 (int8x16_t __a)
> +{
> +  return (uint64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline uint64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u64_u16 (uint16x8_t __a)
> +{
> +  return (uint64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline uint64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u64_u32 (uint32x4_t __a)
> +{
> +  return (uint64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline uint64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u64_u8 (uint8x16_t __a)
> +{
> +  return (uint64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline uint8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u8_s16 (int16x8_t __a)
> +{
> +  return (uint8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline uint8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u8_s32 (int32x4_t __a)
> +{
> +  return (uint8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline uint8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u8_s64 (int64x2_t __a)
> +{
> +  return (uint8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline uint8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u8_s8 (int8x16_t __a)
> +{
> +  return (uint8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline uint8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u8_u16 (uint16x8_t __a)
> +{
> +  return (uint8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline uint8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u8_u32 (uint32x4_t __a)
> +{
> +  return (uint8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline uint8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u8_u64 (uint64x2_t __a)
> +{
> +  return (uint8x16_t)  __a;
> +}
> +
>  #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point.  */
> 
>  __extension__ extern __inline void
> @@ -14771,6 +15336,262 @@ __arm_vaddq_f32 (float32x4_t __a,
> float32x4_t __b)
>    return __a + __b;
>  }
> 
> +__extension__ extern __inline float16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vuninitializedq_f16 (void)
> +{
> +  float16x8_t __uninit;
> +  __asm__ ("": "=w" (__uninit));
> +  return __uninit;
> +}
> +
> +__extension__ extern __inline float32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vuninitializedq_f32 (void)
> +{
> +  float32x4_t __uninit;
> +  __asm__ ("": "=w" (__uninit));
> +  return __uninit;
> +}
> +
> +__extension__ extern __inline int32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s32_f16 (float16x8_t __a)
> +{
> +  return (int32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline int32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s32_f32 (float32x4_t __a)
> +{
> +  return (int32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline int16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s16_f16 (float16x8_t __a)
> +{
> +  return (int16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline int16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s16_f32 (float32x4_t __a)
> +{
> +  return (int16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline int64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s64_f16 (float16x8_t __a)
> +{
> +  return (int64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline int64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s64_f32 (float32x4_t __a)
> +{
> +  return (int64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline int8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s8_f16 (float16x8_t __a)
> +{
> +  return (int8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline int8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_s8_f32 (float32x4_t __a)
> +{
> +  return (int8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline uint16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u16_f16 (float16x8_t __a)
> +{
> +  return (uint16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline uint16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u16_f32 (float32x4_t __a)
> +{
> +  return (uint16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline uint32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u32_f16 (float16x8_t __a)
> +{
> +  return (uint32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline uint32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u32_f32 (float32x4_t __a)
> +{
> +  return (uint32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline uint64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u64_f16 (float16x8_t __a)
> +{
> +  return (uint64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline uint64x2_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u64_f32 (float32x4_t __a)
> +{
> +  return (uint64x2_t)  __a;
> +}
> +
> +__extension__ extern __inline uint8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u8_f16 (float16x8_t __a)
> +{
> +  return (uint8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline uint8x16_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_u8_f32 (float32x4_t __a)
> +{
> +  return (uint8x16_t)  __a;
> +}
> +
> +__extension__ extern __inline float16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f16_f32 (float32x4_t __a)
> +{
> +  return (float16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline float16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f16_s16 (int16x8_t __a)
> +{
> +  return (float16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline float16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f16_s32 (int32x4_t __a)
> +{
> +  return (float16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline float16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f16_s64 (int64x2_t __a)
> +{
> +  return (float16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline float16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f16_s8 (int8x16_t __a)
> +{
> +  return (float16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline float16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f16_u16 (uint16x8_t __a)
> +{
> +  return (float16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline float16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f16_u32 (uint32x4_t __a)
> +{
> +  return (float16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline float16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f16_u64 (uint64x2_t __a)
> +{
> +  return (float16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline float16x8_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f16_u8 (uint8x16_t __a)
> +{
> +  return (float16x8_t)  __a;
> +}
> +
> +__extension__ extern __inline float32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f32_f16 (float16x8_t __a)
> +{
> +  return (float32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline float32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f32_s16 (int16x8_t __a)
> +{
> +  return (float32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline float32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f32_s32 (int32x4_t __a)
> +{
> +  return (float32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline float32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f32_s64 (int64x2_t __a)
> +{
> +  return (float32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline float32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f32_s8 (int8x16_t __a)
> +{
> +  return (float32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline float32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f32_u16 (uint16x8_t __a)
> +{
> +  return (float32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline float32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f32_u32 (uint32x4_t __a)
> +{
> +  return (float32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline float32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f32_u64 (uint64x2_t __a)
> +{
> +  return (float32x4_t)  __a;
> +}
> +
> +__extension__ extern __inline float32x4_t
> +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> +__arm_vreinterpretq_f32_u8 (uint8x16_t __a)
> +{
> +  return (float32x4_t)  __a;
> +}
> +
>  #endif
> 
>  enum {
> @@ -17543,6 +18364,150 @@ extern void *__ARM_undef;
>    int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]:
> __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0,
> uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
>    int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]:
> __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(__p0,
> float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
> 
> +#define vuninitializedq(p0) __arm_vuninitializedq(p0)
> +#define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 (), \
> +  int (*)[__ARM_mve_type_float16x8_t]: __arm_vuninitializedq_f16 (), \
> +  int (*)[__ARM_mve_type_float32x4_t]: __arm_vuninitializedq_f32 ());})
> +
> +#define vreinterpretq_f16(p0) __arm_vreinterpretq_f16(p0)
> +#define __arm_vreinterpretq_f16(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f16_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f16_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f16_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f16_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f16_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f16_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f16_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f16_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)), \
> +  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_f16_f32
> (__ARM_mve_coerce(__p0, float32x4_t)));})
> +
> +#define vreinterpretq_f32(p0) __arm_vreinterpretq_f32(p0)
> +#define __arm_vreinterpretq_f32(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f32_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f32_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f32_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f32_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f32_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f32_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f32_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f32_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)), \
> +  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_f32_f16
> (__ARM_mve_coerce(__p0, float16x8_t)));})
> +
> +#define vreinterpretq_s16(p0) __arm_vreinterpretq_s16(p0)
> +#define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s16_f16
> (__ARM_mve_coerce(__p0, float16x8_t)), \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)), \
> +  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s16_f32
> (__ARM_mve_coerce(__p0, float32x4_t)));})
> +
> +#define vreinterpretq_s32(p0) __arm_vreinterpretq_s32(p0)
> +#define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s32_f16
> (__ARM_mve_coerce(__p0, float16x8_t)), \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)), \
> +  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s32_f32
> (__ARM_mve_coerce(__p0, float32x4_t)));})
> +
> +#define vreinterpretq_s64(p0) __arm_vreinterpretq_s64(p0)
> +#define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s64_f16
> (__ARM_mve_coerce(__p0, float16x8_t)), \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)), \
> +  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s64_f32
> (__ARM_mve_coerce(__p0, float32x4_t)));})
> +
> +#define vreinterpretq_s8(p0) __arm_vreinterpretq_s8(p0)
> +#define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s8_f16
> (__ARM_mve_coerce(__p0, float16x8_t)), \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)), \
> +  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s8_f32
> (__ARM_mve_coerce(__p0, float32x4_t)));})
> +
> +#define vreinterpretq_u16(p0) __arm_vreinterpretq_u16(p0)
> +#define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u16_f16
> (__ARM_mve_coerce(__p0, float16x8_t)), \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)), \
> +  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u16_f32
> (__ARM_mve_coerce(__p0, float32x4_t)));})
> +
> +#define vreinterpretq_u32(p0) __arm_vreinterpretq_u32(p0)
> +#define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u32_f16
> (__ARM_mve_coerce(__p0, float16x8_t)), \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)), \
> +  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u32_f32
> (__ARM_mve_coerce(__p0, float32x4_t)));})
> +
> +#define vreinterpretq_u64(p0) __arm_vreinterpretq_u64(p0)
> +#define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u64_f16
> (__ARM_mve_coerce(__p0, float16x8_t)), \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u64_f32
> (__ARM_mve_coerce(__p0, float32x4_t)));})
> +
> +#define vreinterpretq_u8(p0) __arm_vreinterpretq_u8(p0)
> +#define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u8_f16
> (__ARM_mve_coerce(__p0, float16x8_t)), \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)), \
> +  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u8_f32
> (__ARM_mve_coerce(__p0, float32x4_t)));})
> +
>  #else /* MVE Integer.  */
> 
>  #define vst4q(p0,p1) __arm_vst4q(p0,p1)
> @@ -19925,6 +20890,106 @@ extern void *__ARM_undef;
>    int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]:
> __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0,
> int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
>    int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]:
> __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0,
> uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
> 
> +#define vuninitializedq(p0) __arm_vuninitializedq(p0)
> +#define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 ());})
> +
> +#define vreinterpretq_s16(p0) __arm_vreinterpretq_s16(p0)
> +#define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)));})
> +
> +#define vreinterpretq_s32(p0) __arm_vreinterpretq_s32(p0)
> +#define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)));})
> +
> +#define vreinterpretq_s64(p0) __arm_vreinterpretq_s64(p0)
> +#define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)));})
> +
> +#define vreinterpretq_s8(p0) __arm_vreinterpretq_s8(p0)
> +#define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)));})
> +
> +#define vreinterpretq_u16(p0) __arm_vreinterpretq_u16(p0)
> +#define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)));})
> +
> +#define vreinterpretq_u32(p0) __arm_vreinterpretq_u32(p0)
> +#define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)));})
> +
> +#define vreinterpretq_u64(p0) __arm_vreinterpretq_u64(p0)
> +#define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8
> (__ARM_mve_coerce(__p0, uint8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64
> (__ARM_mve_coerce(__p0, int64x2_t)));})
> +
> +#define vreinterpretq_u8(p0) __arm_vreinterpretq_u8(p0)
> +#define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \
> +  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
> +  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16
> (__ARM_mve_coerce(__p0, int16x8_t)), \
> +  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32
> (__ARM_mve_coerce(__p0, int32x4_t)), \
> +  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64
> (__ARM_mve_coerce(__p0, int64x2_t)), \
> +  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8
> (__ARM_mve_coerce(__p0, int8x16_t)), \
> +  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16
> (__ARM_mve_coerce(__p0, uint16x8_t)), \
> +  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32
> (__ARM_mve_coerce(__p0, uint32x4_t)), \
> +  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64
> (__ARM_mve_coerce(__p0, uint64x2_t)));})
> +
>  #endif /* MVE Integer.  */
> 
>  #define vldrdq_gather_offset(p0,p1) __arm_vldrdq_gather_offset(p0,p1)
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_f16.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_f16.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..bc40440296522a96a7c6fb0
> a7732c735ea37b266
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_f16.c
> @@ -0,0 +1,45 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +int8x16_t value1;
> +int64x2_t value2;
> +int32x4_t value3;
> +uint8x16_t value4;
> +uint16x8_t value5;
> +uint64x2_t value6;
> +uint32x4_t value7;
> +int16x8_t value8;
> +float32x4_t value9;
> +
> +float16x8_t
> +foo ()
> +{
> +  float16x8_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_f16 (vreinterpretq_f16_s8 (value1), vreinterpretq_f16_s64
> (value2));
> +  r2 = vaddq_f16 (r1, vreinterpretq_f16_s32 (value3));
> +  r3 = vaddq_f16 (r2, vreinterpretq_f16_u8 (value4));
> +  r4 = vaddq_f16 (r3, vreinterpretq_f16_u16 (value5));
> +  r5 = vaddq_f16 (r4, vreinterpretq_f16_u64 (value6));
> +  r6 = vaddq_f16 (r5, vreinterpretq_f16_u32 (value7));
> +  r7 = vaddq_f16 (r6, vreinterpretq_f16_s16 (value8));
> +  return vaddq_f16 (r7, vreinterpretq_f16_f32 (value9));
> +}
> +
> +float16x8_t
> +foo1 ()
> +{
> +  float16x8_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_f16 (vreinterpretq_f16 (value1), vreinterpretq_f16 (value2));
> +  r2 = vaddq_f16 (r1, vreinterpretq_f16 (value3));
> +  r3 = vaddq_f16 (r2, vreinterpretq_f16 (value4));
> +  r4 = vaddq_f16 (r3, vreinterpretq_f16 (value5));
> +  r5 = vaddq_f16 (r4, vreinterpretq_f16 (value6));
> +  r6 = vaddq_f16 (r5, vreinterpretq_f16 (value7));
> +  r7 = vaddq_f16 (r6, vreinterpretq_f16 (value8));
> +  return vaddq_f16 (r7, vreinterpretq_f16 (value9));
> +}
> +
> +/* { dg-final { scan-assembler-times "vadd.f16" 8 } } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_f32.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_f32.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..d30818b0f3d896f7e9f3022
> 7b8578f90d2731209
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_f32.c
> @@ -0,0 +1,45 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +int16x8_t value1;
> +int64x2_t value2;
> +int8x16_t value3;
> +uint8x16_t value4;
> +uint16x8_t value5;
> +uint64x2_t value6;
> +uint32x4_t value7;
> +float16x8_t value8;
> +int32x4_t value9;
> +
> +float32x4_t
> +foo ()
> +{
> +  float32x4_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_f32 (vreinterpretq_f32_s16 (value1), vreinterpretq_f32_s64
> (value2));
> +  r2 = vaddq_f32 (r1, vreinterpretq_f32_s8 (value3));
> +  r3 = vaddq_f32 (r2, vreinterpretq_f32_u8 (value4));
> +  r4 = vaddq_f32 (r3, vreinterpretq_f32_u16 (value5));
> +  r5 = vaddq_f32 (r4, vreinterpretq_f32_u64 (value6));
> +  r6 = vaddq_f32 (r5, vreinterpretq_f32_u32 (value7));
> +  r7 = vaddq_f32 (r6, vreinterpretq_f32_f16 (value8));
> +  return vaddq_f32 (r7, vreinterpretq_f32_s32 (value9));
> +}
> +
> +float32x4_t
> +foo1 ()
> +{
> +  float32x4_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_f32 (vreinterpretq_f32 (value1), vreinterpretq_f32 (value2));
> +  r2 = vaddq_f32 (r1, vreinterpretq_f32 (value3));
> +  r3 = vaddq_f32 (r2, vreinterpretq_f32 (value4));
> +  r4 = vaddq_f32 (r3, vreinterpretq_f32 (value5));
> +  r5 = vaddq_f32 (r4, vreinterpretq_f32 (value6));
> +  r6 = vaddq_f32 (r5, vreinterpretq_f32 (value7));
> +  r7 = vaddq_f32 (r6, vreinterpretq_f32 (value8));
> +  return vaddq_f32 (r7, vreinterpretq_f32 (value9));
> +}
> +
> +/* { dg-final { scan-assembler-times "vadd.f32" 8 } } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s16.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s16.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..627a9d8de7c7ac850c3b9f1
> 049057264d908b34d
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s16.c
> @@ -0,0 +1,45 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +int8x16_t value1;
> +int64x2_t value2;
> +int32x4_t value3;
> +uint8x16_t value4;
> +uint16x8_t value5;
> +uint64x2_t value6;
> +uint32x4_t value7;
> +float16x8_t value8;
> +float32x4_t value9;
> +
> +int16x8_t
> +foo ()
> +{
> +  int16x8_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_s16 (vreinterpretq_s16_s8 (value1), vreinterpretq_s16_s64
> (value2));
> +  r2 = vaddq_s16 (r1, vreinterpretq_s16_s32 (value3));
> +  r3 = vaddq_s16 (r2, vreinterpretq_s16_u8 (value4));
> +  r4 = vaddq_s16 (r3, vreinterpretq_s16_u16 (value5));
> +  r5 = vaddq_s16 (r4, vreinterpretq_s16_u64 (value6));
> +  r6 = vaddq_s16 (r5, vreinterpretq_s16_u32 (value7));
> +  r7 = vaddq_s16 (r6, vreinterpretq_s16_f16 (value8));
> +  return vaddq_s16 (r7, vreinterpretq_s16_f32 (value9));
> +}
> +
> +int16x8_t
> +foo1 ()
> +{
> +  int16x8_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_s16 (vreinterpretq_s16 (value1), vreinterpretq_s16 (value2));
> +  r2 = vaddq_s16 (r1, vreinterpretq_s16 (value3));
> +  r3 = vaddq_s16 (r2, vreinterpretq_s16 (value4));
> +  r4 = vaddq_s16 (r3, vreinterpretq_s16 (value5));
> +  r5 = vaddq_s16 (r4, vreinterpretq_s16 (value6));
> +  r6 = vaddq_s16 (r5, vreinterpretq_s16 (value7));
> +  r7 = vaddq_s16 (r6, vreinterpretq_s16 (value8));
> +  return vaddq_s16 (r7, vreinterpretq_s16 (value9));
> +}
> +
> +/* { dg-final { scan-assembler-times "vadd.i16" 8 } } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s32.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s32.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..1b905e1095348e1e4376e0
> ef695f4b607a29f8b0
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s32.c
> @@ -0,0 +1,45 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +int16x8_t value1;
> +int64x2_t value2;
> +int8x16_t value3;
> +uint8x16_t value4;
> +uint16x8_t value5;
> +uint64x2_t value6;
> +uint32x4_t value7;
> +float16x8_t value8;
> +float32x4_t value9;
> +
> +int32x4_t
> +foo ()
> +{
> +  int32x4_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_s32 (vreinterpretq_s32_s16 (value1), vreinterpretq_s32_s64
> (value2));
> +  r2 = vaddq_s32 (r1, vreinterpretq_s32_s8 (value3));
> +  r3 = vaddq_s32 (r2, vreinterpretq_s32_u8 (value4));
> +  r4 = vaddq_s32 (r3, vreinterpretq_s32_u16 (value5));
> +  r5 = vaddq_s32 (r4, vreinterpretq_s32_u64 (value6));
> +  r6 = vaddq_s32 (r5, vreinterpretq_s32_u32 (value7));
> +  r7 = vaddq_s32 (r6, vreinterpretq_s32_f16 (value8));
> +  return vaddq_s32 (r7, vreinterpretq_s32_f32 (value9));
> +}
> +
> +int32x4_t
> +foo1 ()
> +{
> +  int32x4_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_s32 (vreinterpretq_s32 (value1), vreinterpretq_s32 (value2));
> +  r2 = vaddq_s32 (r1, vreinterpretq_s32 (value3));
> +  r3 = vaddq_s32 (r2, vreinterpretq_s32 (value4));
> +  r4 = vaddq_s32 (r3, vreinterpretq_s32 (value5));
> +  r5 = vaddq_s32 (r4, vreinterpretq_s32 (value6));
> +  r6 = vaddq_s32 (r5, vreinterpretq_s32 (value7));
> +  r7 = vaddq_s32 (r6, vreinterpretq_s32 (value8));
> +  return vaddq_s32 (r7, vreinterpretq_s32 (value9));
> +}
> +
> +/* { dg-final { scan-assembler-times "vadd.i32" 8 } } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s64.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s64.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..3a9fa0b414c202c77890e0cf
> 061102b19fb7e623
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s64.c
> @@ -0,0 +1,46 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +int16x8_t value1;
> +int8x16_t value2;
> +int32x4_t value3;
> +uint8x16_t value4;
> +uint16x8_t value5;
> +uint64x2_t value6;
> +uint32x4_t value7;
> +float16x8_t value8;
> +float32x4_t value9;
> +
> +int64x2_t
> +foo (mve_pred16_t __p)
> +{
> +  int64x2_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vpselq_s64 (vreinterpretq_s64_s16 (value1), vreinterpretq_s64_s8
> (value2),
> +		   __p);
> +  r2 = vpselq_s64 (r1, vreinterpretq_s64_s32 (value3), __p);
> +  r3 = vpselq_s64 (r2, vreinterpretq_s64_u8 (value4), __p);
> +  r4 = vpselq_s64 (r3, vreinterpretq_s64_u16 (value5), __p);
> +  r5 = vpselq_s64 (r4, vreinterpretq_s64_u64 (value6), __p);
> +  r6 = vpselq_s64 (r5, vreinterpretq_s64_u32 (value7), __p);
> +  r7 = vpselq_s64 (r6, vreinterpretq_s64_f16 (value8), __p);
> +  return vpselq_s64 (r7, vreinterpretq_s64_f32 (value9), __p);
> +}
> +
> +int64x2_t
> +foo1 (mve_pred16_t __p)
> +{
> +  int64x2_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vpselq_s64 (vreinterpretq_s64 (value1), vreinterpretq_s64 (value2),
> __p);
> +  r2 = vpselq_s64 (r1, vreinterpretq_s64 (value3), __p);
> +  r3 = vpselq_s64 (r2, vreinterpretq_s64 (value4), __p);
> +  r4 = vpselq_s64 (r3, vreinterpretq_s64 (value5), __p);
> +  r5 = vpselq_s64 (r4, vreinterpretq_s64 (value6), __p);
> +  r6 = vpselq_s64 (r5, vreinterpretq_s64 (value7), __p);
> +  r7 = vpselq_s64 (r6, vreinterpretq_s64 (value8), __p);
> +  return vpselq_s64 (r7, vreinterpretq_s64 (value9), __p);
> +}
> +
> +/* { dg-final { scan-assembler-times "vpsel" 8 } } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s8.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s8.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..522a935c72f81bad63bdf2f
> 56db135fc4261c766
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s8.c
> @@ -0,0 +1,45 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +int16x8_t value1;
> +int64x2_t value2;
> +int32x4_t value3;
> +uint8x16_t value4;
> +uint16x8_t value5;
> +uint64x2_t value6;
> +uint32x4_t value7;
> +float16x8_t value8;
> +float32x4_t value9;
> +
> +int8x16_t
> +foo ()
> +{
> +  int8x16_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_s8 (vreinterpretq_s8_s16 (value1), vreinterpretq_s8_s64
> (value2));
> +  r2 = vaddq_s8 (r1, vreinterpretq_s8_s32 (value3));
> +  r3 = vaddq_s8 (r2, vreinterpretq_s8_u8 (value4));
> +  r4 = vaddq_s8 (r3, vreinterpretq_s8_u16 (value5));
> +  r5 = vaddq_s8 (r4, vreinterpretq_s8_u64 (value6));
> +  r6 = vaddq_s8 (r5, vreinterpretq_s8_u32 (value7));
> +  r7 = vaddq_s8 (r6, vreinterpretq_s8_f16 (value8));
> +  return vaddq_s8 (r7, vreinterpretq_s8_f32 (value9));
> +}
> +
> +int8x16_t
> +foo1 ()
> +{
> +  int8x16_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_s8 (vreinterpretq_s8 (value1), vreinterpretq_s8 (value2));
> +  r2 = vaddq_s8 (r1, vreinterpretq_s8 (value3));
> +  r3 = vaddq_s8 (r2, vreinterpretq_s8 (value4));
> +  r4 = vaddq_s8 (r3, vreinterpretq_s8 (value5));
> +  r5 = vaddq_s8 (r4, vreinterpretq_s8 (value6));
> +  r6 = vaddq_s8 (r5, vreinterpretq_s8 (value7));
> +  r7 = vaddq_s8 (r6, vreinterpretq_s8 (value8));
> +  return vaddq_s8 (r7, vreinterpretq_s8 (value9));
> +}
> +
> +/* { dg-final { scan-assembler-times "vadd.i8" 8 } } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u16.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u16.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..402c0ef61df85be4115f14fd
> f195548ecd15b25f
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u16.c
> @@ -0,0 +1,45 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +int8x16_t value1;
> +int64x2_t value2;
> +int32x4_t value3;
> +uint8x16_t value4;
> +int16x8_t value5;
> +uint64x2_t value6;
> +uint32x4_t value7;
> +float16x8_t value8;
> +float32x4_t value9;
> +
> +uint16x8_t
> +foo ()
> +{
> +  uint16x8_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_u16 (vreinterpretq_u16_s8 (value1), vreinterpretq_u16_s64
> (value2));
> +  r2 = vaddq_u16 (r1, vreinterpretq_u16_s32 (value3));
> +  r3 = vaddq_u16 (r2, vreinterpretq_u16_u8 (value4));
> +  r4 = vaddq_u16 (r3, vreinterpretq_u16_s16 (value5));
> +  r5 = vaddq_u16 (r4, vreinterpretq_u16_u64 (value6));
> +  r6 = vaddq_u16 (r5, vreinterpretq_u16_u32 (value7));
> +  r7 = vaddq_u16 (r6, vreinterpretq_u16_f16 (value8));
> +  return vaddq_u16 (r7, vreinterpretq_u16_f32 (value9));
> +}
> +
> +uint16x8_t
> +foo1 ()
> +{
> +  uint16x8_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_u16 (vreinterpretq_u16 (value1), vreinterpretq_u16 (value2));
> +  r2 = vaddq_u16 (r1, vreinterpretq_u16 (value3));
> +  r3 = vaddq_u16 (r2, vreinterpretq_u16 (value4));
> +  r4 = vaddq_u16 (r3, vreinterpretq_u16 (value5));
> +  r5 = vaddq_u16 (r4, vreinterpretq_u16 (value6));
> +  r6 = vaddq_u16 (r5, vreinterpretq_u16 (value7));
> +  r7 = vaddq_u16 (r6, vreinterpretq_u16 (value8));
> +  return vaddq_u16 (r7, vreinterpretq_u16 (value9));
> +}
> +
> +/* { dg-final { scan-assembler-times "vadd.i16" 8 } } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u32.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u32.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..985d776831e7235002a62e
> 88ba9bdf128e31bfd9
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u32.c
> @@ -0,0 +1,45 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +int16x8_t value1;
> +int64x2_t value2;
> +int8x16_t value3;
> +uint8x16_t value4;
> +uint16x8_t value5;
> +uint64x2_t value6;
> +int32x4_t value7;
> +float16x8_t value8;
> +float32x4_t value9;
> +
> +uint32x4_t
> +foo ()
> +{
> +  uint32x4_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_u32 (vreinterpretq_u32_s16 (value1), vreinterpretq_u32_s64
> (value2));
> +  r2 = vaddq_u32 (r1, vreinterpretq_u32_s8 (value3));
> +  r3 = vaddq_u32 (r2, vreinterpretq_u32_u8 (value4));
> +  r4 = vaddq_u32 (r3, vreinterpretq_u32_u16 (value5));
> +  r5 = vaddq_u32 (r4, vreinterpretq_u32_u64 (value6));
> +  r6 = vaddq_u32 (r5, vreinterpretq_u32_s32 (value7));
> +  r7 = vaddq_u32 (r6, vreinterpretq_u32_f16 (value8));
> +  return vaddq_u32 (r7, vreinterpretq_u32_f32 (value9));
> +}
> +
> +uint32x4_t
> +foo1 ()
> +{
> +  uint32x4_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_u32 (vreinterpretq_u32 (value1), vreinterpretq_u32 (value2));
> +  r2 = vaddq_u32 (r1, vreinterpretq_u32 (value3));
> +  r3 = vaddq_u32 (r2, vreinterpretq_u32 (value4));
> +  r4 = vaddq_u32 (r3, vreinterpretq_u32 (value5));
> +  r5 = vaddq_u32 (r4, vreinterpretq_u32 (value6));
> +  r6 = vaddq_u32 (r5, vreinterpretq_u32 (value7));
> +  r7 = vaddq_u32 (r6, vreinterpretq_u32 (value8));
> +  return vaddq_u32 (r7, vreinterpretq_u32 (value9));
> +}
> +
> +/* { dg-final { scan-assembler-times "vadd.i32" 8 } } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u64.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u64.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..e77d253a993d13bd17b107
> fd68f0149c7714742a
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u64.c
> @@ -0,0 +1,46 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +int16x8_t value1;
> +int8x16_t value2;
> +int32x4_t value3;
> +uint8x16_t value4;
> +uint16x8_t value5;
> +int64x2_t value6;
> +uint32x4_t value7;
> +float16x8_t value8;
> +float32x4_t value9;
> +
> +uint64x2_t
> +foo (mve_pred16_t __p)
> +{
> +  uint64x2_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vpselq_u64 (vreinterpretq_u64_s16 (value1), vreinterpretq_u64_s8
> (value2),
> +		   __p);
> +  r2 = vpselq_u64 (r1, vreinterpretq_u64_s32 (value3), __p);
> +  r3 = vpselq_u64 (r2, vreinterpretq_u64_u8 (value4), __p);
> +  r4 = vpselq_u64 (r3, vreinterpretq_u64_u16 (value5), __p);
> +  r5 = vpselq_u64 (r4, vreinterpretq_u64_s64 (value6), __p);
> +  r6 = vpselq_u64 (r5, vreinterpretq_u64_u32 (value7), __p);
> +  r7 = vpselq_u64 (r6, vreinterpretq_u64_f16 (value8), __p);
> +  return vpselq_u64 (r7, vreinterpretq_u64_f32 (value9), __p);
> +}
> +
> +uint64x2_t
> +foo1 (mve_pred16_t __p)
> +{
> +  uint64x2_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vpselq_u64 (vreinterpretq_u64 (value1), vreinterpretq_u64 (value2),
> __p);
> +  r2 = vpselq_u64 (r1, vreinterpretq_u64 (value3), __p);
> +  r3 = vpselq_u64 (r2, vreinterpretq_u64 (value4), __p);
> +  r4 = vpselq_u64 (r3, vreinterpretq_u64 (value5), __p);
> +  r5 = vpselq_u64 (r4, vreinterpretq_u64 (value6), __p);
> +  r6 = vpselq_u64 (r5, vreinterpretq_u64 (value7), __p);
> +  r7 = vpselq_u64 (r6, vreinterpretq_u64 (value8), __p);
> +  return vpselq_u64 (r7, vreinterpretq_u64 (value9), __p);
> +}
> +
> +/* { dg-final { scan-assembler-times "vpsel" 8 } } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u8.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u8.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..9075dea900899dd599df565
> aa4f5a7c0c9be2a2d
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u8.c
> @@ -0,0 +1,45 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O2" } */
> +
> +#include "arm_mve.h"
> +int16x8_t value1;
> +int64x2_t value2;
> +int32x4_t value3;
> +int8x16_t value4;
> +uint16x8_t value5;
> +uint64x2_t value6;
> +uint32x4_t value7;
> +float16x8_t value8;
> +float32x4_t value9;
> +
> +uint8x16_t
> +foo ()
> +{
> +  uint8x16_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_u8 (vreinterpretq_u8_s16 (value1), vreinterpretq_u8_s64
> (value2));
> +  r2 = vaddq_u8 (r1, vreinterpretq_u8_s32 (value3));
> +  r3 = vaddq_u8 (r2, vreinterpretq_u8_s8 (value4));
> +  r4 = vaddq_u8 (r3, vreinterpretq_u8_u16 (value5));
> +  r5 = vaddq_u8 (r4, vreinterpretq_u8_u64 (value6));
> +  r6 = vaddq_u8 (r5, vreinterpretq_u8_u32 (value7));
> +  r7 = vaddq_u8 (r6, vreinterpretq_u8_f16 (value8));
> +  return vaddq_u8 (r7, vreinterpretq_u8_f32 (value9));
> +}
> +
> +uint8x16_t
> +foo1 ()
> +{
> +  uint8x16_t r1,r2,r3,r4,r5,r6,r7;
> +  r1 = vaddq_u8 (vreinterpretq_u8 (value1), vreinterpretq_u8 (value2));
> +  r2 = vaddq_u8 (r1, vreinterpretq_u8 (value3));
> +  r3 = vaddq_u8 (r2, vreinterpretq_u8 (value4));
> +  r4 = vaddq_u8 (r3, vreinterpretq_u8 (value5));
> +  r5 = vaddq_u8 (r4, vreinterpretq_u8 (value6));
> +  r6 = vaddq_u8 (r5, vreinterpretq_u8 (value7));
> +  r7 = vaddq_u8 (r6, vreinterpretq_u8 (value8));
> +  return vaddq_u8 (r7, vreinterpretq_u8 (value9));
> +}
> +
> +/* { dg-final { scan-assembler-times "vadd.i8" 8 } } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_float.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_float.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..761d569c2c0cd2363f80b2a
> bc257b53c37b68697
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_float.c
> @@ -0,0 +1,17 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O0" } */
> +
> +#include "arm_mve.h"
> +
> +void
> +foo ()
> +{
> +  float16x8_t fa;
> +  float32x4_t fb;
> +  fa = vuninitializedq_f16 ();
> +  fb = vuninitializedq_f32 ();
> +}
> +
> +/* { dg-final { scan-assembler-times "vstrb.8" 4 } } */
> diff --git
> a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_float1.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_float1.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..173b978488a540e5502cc05
> efb97a5ea008ccf3b
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_float1.c
> @@ -0,0 +1,17 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
> +/* { dg-add-options arm_v8_1m_mve_fp } */
> +/* { dg-additional-options "-O0" } */
> +
> +#include "arm_mve.h"
> +
> +void
> +foo ()
> +{
> +  float16x8_t fa, faa;
> +  float32x4_t fb, fbb;
> +  fa = vuninitializedq (faa);
> +  fb = vuninitializedq (fbb);
> +}
> +
> +/* { dg-final { scan-assembler-times "vstrb.8" 4444} */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_int.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_int.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..2969f331d80a0fa5f56a6f76
> 077900db2af9a8e2
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_int.c
> @@ -0,0 +1,29 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_ok } */
> +/* { dg-add-options arm_v8_1m_mve } */
> +/* { dg-additional-options "-O0" } */
> +
> +#include "arm_mve.h"
> +
> +void
> +foo ()
> +{
> +  int8x16_t a;
> +  int16x8_t b;
> +  int32x4_t c;
> +  int64x2_t d;
> +  uint8x16_t ua;
> +  uint16x8_t ub;
> +  uint32x4_t uc;
> +  uint64x2_t ud;
> +  a = vuninitializedq_s8 ();
> +  b = vuninitializedq_s16 ();
> +  c = vuninitializedq_s32 ();
> +  d = vuninitializedq_s64 ();
> +  ua = vuninitializedq_u8 ();
> +  ub = vuninitializedq_u16 ();
> +  uc = vuninitializedq_u32 ();
> +  ud = vuninitializedq_u64 ();
> +}
> +
> +/* { dg-final { scan-assembler-times "vstrb.8" 16 } } */
> diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_int1.c
> b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_int1.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..555019011a38842911177aa
> 645516cee80c4abb0
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_int1.c
> @@ -0,0 +1,29 @@
> +/* { dg-do compile  } */
> +/* { dg-require-effective-target arm_v8_1m_mve_ok } */
> +/* { dg-add-options arm_v8_1m_mve } */
> +/* { dg-additional-options "-O0" } */
> +
> +#include "arm_mve.h"
> +
> +void
> +foo ()
> +{
> +  int8x16_t a, aa;
> +  int16x8_t b, bb;
> +  int32x4_t c, cc;
> +  int64x2_t d, dd;
> +  uint8x16_t ua, uaa;
> +  uint16x8_t ub, ubb;
> +  uint32x4_t uc, ucc;
> +  uint64x2_t ud, udd;
> +  a = vuninitializedq (aa);
> +  b = vuninitializedq (bb);
> +  c = vuninitializedq (cc);
> +  d = vuninitializedq (dd);
> +  ua = vuninitializedq (uaa);
> +  ub = vuninitializedq (ubb);
> +  uc = vuninitializedq (ucc);
> +  ud = vuninitializedq (udd);
> +}
> +
> +/* { dg-final { scan-assembler-times "vstrb.8" 24 } } */
diff mbox series

Patch

diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
index 55c256910bb7f4c616ea592be699f7f4fc3f17f7..916565c9b55bae77869669fd1e8f8b7f4a37b52e 100644
--- a/gcc/config/arm/arm_mve.h
+++ b/gcc/config/arm/arm_mve.h
@@ -1906,6 +1906,106 @@  typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
 #define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b)
 #define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b)
 #define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b)
+#define vreinterpretq_s16_s32(__a) __arm_vreinterpretq_s16_s32(__a)
+#define vreinterpretq_s16_s64(__a) __arm_vreinterpretq_s16_s64(__a)
+#define vreinterpretq_s16_s8(__a) __arm_vreinterpretq_s16_s8(__a)
+#define vreinterpretq_s16_u16(__a) __arm_vreinterpretq_s16_u16(__a)
+#define vreinterpretq_s16_u32(__a) __arm_vreinterpretq_s16_u32(__a)
+#define vreinterpretq_s16_u64(__a) __arm_vreinterpretq_s16_u64(__a)
+#define vreinterpretq_s16_u8(__a) __arm_vreinterpretq_s16_u8(__a)
+#define vreinterpretq_s32_s16(__a) __arm_vreinterpretq_s32_s16(__a)
+#define vreinterpretq_s32_s64(__a) __arm_vreinterpretq_s32_s64(__a)
+#define vreinterpretq_s32_s8(__a) __arm_vreinterpretq_s32_s8(__a)
+#define vreinterpretq_s32_u16(__a) __arm_vreinterpretq_s32_u16(__a)
+#define vreinterpretq_s32_u32(__a) __arm_vreinterpretq_s32_u32(__a)
+#define vreinterpretq_s32_u64(__a) __arm_vreinterpretq_s32_u64(__a)
+#define vreinterpretq_s32_u8(__a) __arm_vreinterpretq_s32_u8(__a)
+#define vreinterpretq_s64_s16(__a) __arm_vreinterpretq_s64_s16(__a)
+#define vreinterpretq_s64_s32(__a) __arm_vreinterpretq_s64_s32(__a)
+#define vreinterpretq_s64_s8(__a) __arm_vreinterpretq_s64_s8(__a)
+#define vreinterpretq_s64_u16(__a) __arm_vreinterpretq_s64_u16(__a)
+#define vreinterpretq_s64_u32(__a) __arm_vreinterpretq_s64_u32(__a)
+#define vreinterpretq_s64_u64(__a) __arm_vreinterpretq_s64_u64(__a)
+#define vreinterpretq_s64_u8(__a) __arm_vreinterpretq_s64_u8(__a)
+#define vreinterpretq_s8_s16(__a) __arm_vreinterpretq_s8_s16(__a)
+#define vreinterpretq_s8_s32(__a) __arm_vreinterpretq_s8_s32(__a)
+#define vreinterpretq_s8_s64(__a) __arm_vreinterpretq_s8_s64(__a)
+#define vreinterpretq_s8_u16(__a) __arm_vreinterpretq_s8_u16(__a)
+#define vreinterpretq_s8_u32(__a) __arm_vreinterpretq_s8_u32(__a)
+#define vreinterpretq_s8_u64(__a) __arm_vreinterpretq_s8_u64(__a)
+#define vreinterpretq_s8_u8(__a) __arm_vreinterpretq_s8_u8(__a)
+#define vreinterpretq_u16_s16(__a) __arm_vreinterpretq_u16_s16(__a)
+#define vreinterpretq_u16_s32(__a) __arm_vreinterpretq_u16_s32(__a)
+#define vreinterpretq_u16_s64(__a) __arm_vreinterpretq_u16_s64(__a)
+#define vreinterpretq_u16_s8(__a) __arm_vreinterpretq_u16_s8(__a)
+#define vreinterpretq_u16_u32(__a) __arm_vreinterpretq_u16_u32(__a)
+#define vreinterpretq_u16_u64(__a) __arm_vreinterpretq_u16_u64(__a)
+#define vreinterpretq_u16_u8(__a) __arm_vreinterpretq_u16_u8(__a)
+#define vreinterpretq_u32_s16(__a) __arm_vreinterpretq_u32_s16(__a)
+#define vreinterpretq_u32_s32(__a) __arm_vreinterpretq_u32_s32(__a)
+#define vreinterpretq_u32_s64(__a) __arm_vreinterpretq_u32_s64(__a)
+#define vreinterpretq_u32_s8(__a) __arm_vreinterpretq_u32_s8(__a)
+#define vreinterpretq_u32_u16(__a) __arm_vreinterpretq_u32_u16(__a)
+#define vreinterpretq_u32_u64(__a) __arm_vreinterpretq_u32_u64(__a)
+#define vreinterpretq_u32_u8(__a) __arm_vreinterpretq_u32_u8(__a)
+#define vreinterpretq_u64_s16(__a) __arm_vreinterpretq_u64_s16(__a)
+#define vreinterpretq_u64_s32(__a) __arm_vreinterpretq_u64_s32(__a)
+#define vreinterpretq_u64_s64(__a) __arm_vreinterpretq_u64_s64(__a)
+#define vreinterpretq_u64_s8(__a) __arm_vreinterpretq_u64_s8(__a)
+#define vreinterpretq_u64_u16(__a) __arm_vreinterpretq_u64_u16(__a)
+#define vreinterpretq_u64_u32(__a) __arm_vreinterpretq_u64_u32(__a)
+#define vreinterpretq_u64_u8(__a) __arm_vreinterpretq_u64_u8(__a)
+#define vreinterpretq_u8_s16(__a) __arm_vreinterpretq_u8_s16(__a)
+#define vreinterpretq_u8_s32(__a) __arm_vreinterpretq_u8_s32(__a)
+#define vreinterpretq_u8_s64(__a) __arm_vreinterpretq_u8_s64(__a)
+#define vreinterpretq_u8_s8(__a) __arm_vreinterpretq_u8_s8(__a)
+#define vreinterpretq_u8_u16(__a) __arm_vreinterpretq_u8_u16(__a)
+#define vreinterpretq_u8_u32(__a) __arm_vreinterpretq_u8_u32(__a)
+#define vreinterpretq_u8_u64(__a) __arm_vreinterpretq_u8_u64(__a)
+#define vreinterpretq_s32_f16(__a) __arm_vreinterpretq_s32_f16(__a)
+#define vreinterpretq_s32_f32(__a) __arm_vreinterpretq_s32_f32(__a)
+#define vreinterpretq_u16_f16(__a) __arm_vreinterpretq_u16_f16(__a)
+#define vreinterpretq_u16_f32(__a) __arm_vreinterpretq_u16_f32(__a)
+#define vreinterpretq_u32_f16(__a) __arm_vreinterpretq_u32_f16(__a)
+#define vreinterpretq_u32_f32(__a) __arm_vreinterpretq_u32_f32(__a)
+#define vreinterpretq_u64_f16(__a) __arm_vreinterpretq_u64_f16(__a)
+#define vreinterpretq_u64_f32(__a) __arm_vreinterpretq_u64_f32(__a)
+#define vreinterpretq_u8_f16(__a) __arm_vreinterpretq_u8_f16(__a)
+#define vreinterpretq_u8_f32(__a) __arm_vreinterpretq_u8_f32(__a)
+#define vreinterpretq_f16_f32(__a) __arm_vreinterpretq_f16_f32(__a)
+#define vreinterpretq_f16_s16(__a) __arm_vreinterpretq_f16_s16(__a)
+#define vreinterpretq_f16_s32(__a) __arm_vreinterpretq_f16_s32(__a)
+#define vreinterpretq_f16_s64(__a) __arm_vreinterpretq_f16_s64(__a)
+#define vreinterpretq_f16_s8(__a) __arm_vreinterpretq_f16_s8(__a)
+#define vreinterpretq_f16_u16(__a) __arm_vreinterpretq_f16_u16(__a)
+#define vreinterpretq_f16_u32(__a) __arm_vreinterpretq_f16_u32(__a)
+#define vreinterpretq_f16_u64(__a) __arm_vreinterpretq_f16_u64(__a)
+#define vreinterpretq_f16_u8(__a) __arm_vreinterpretq_f16_u8(__a)
+#define vreinterpretq_f32_f16(__a) __arm_vreinterpretq_f32_f16(__a)
+#define vreinterpretq_f32_s16(__a) __arm_vreinterpretq_f32_s16(__a)
+#define vreinterpretq_f32_s32(__a) __arm_vreinterpretq_f32_s32(__a)
+#define vreinterpretq_f32_s64(__a) __arm_vreinterpretq_f32_s64(__a)
+#define vreinterpretq_f32_s8(__a) __arm_vreinterpretq_f32_s8(__a)
+#define vreinterpretq_f32_u16(__a) __arm_vreinterpretq_f32_u16(__a)
+#define vreinterpretq_f32_u32(__a) __arm_vreinterpretq_f32_u32(__a)
+#define vreinterpretq_f32_u64(__a) __arm_vreinterpretq_f32_u64(__a)
+#define vreinterpretq_f32_u8(__a) __arm_vreinterpretq_f32_u8(__a)
+#define vreinterpretq_s16_f16(__a) __arm_vreinterpretq_s16_f16(__a)
+#define vreinterpretq_s16_f32(__a) __arm_vreinterpretq_s16_f32(__a)
+#define vreinterpretq_s64_f16(__a) __arm_vreinterpretq_s64_f16(__a)
+#define vreinterpretq_s64_f32(__a) __arm_vreinterpretq_s64_f32(__a)
+#define vreinterpretq_s8_f16(__a) __arm_vreinterpretq_s8_f16(__a)
+#define vreinterpretq_s8_f32(__a) __arm_vreinterpretq_s8_f32(__a)
+#define vuninitializedq_u8(void) __arm_vuninitializedq_u8(void)
+#define vuninitializedq_u16(void) __arm_vuninitializedq_u16(void)
+#define vuninitializedq_u32(void) __arm_vuninitializedq_u32(void)
+#define vuninitializedq_u64(void) __arm_vuninitializedq_u64(void)
+#define vuninitializedq_s8(void) __arm_vuninitializedq_s8(void)
+#define vuninitializedq_s16(void) __arm_vuninitializedq_s16(void)
+#define vuninitializedq_s32(void) __arm_vuninitializedq_s32(void)
+#define vuninitializedq_s64(void) __arm_vuninitializedq_s64(void)
+#define vuninitializedq_f16(void) __arm_vuninitializedq_f16(void)
+#define vuninitializedq_f32(void) __arm_vuninitializedq_f32(void)
 #endif
 
 __extension__ extern __inline void
@@ -12391,6 +12491,471 @@  __arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
   return __a + __b;
 }
 
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_u8 (void)
+{
+  uint8x16_t __uninit;
+  __asm__ ("": "=w"(__uninit));
+  return __uninit;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_u16 (void)
+{
+  uint16x8_t __uninit;
+  __asm__ ("": "=w"(__uninit));
+  return __uninit;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_u32 (void)
+{
+  uint32x4_t __uninit;
+  __asm__ ("": "=w"(__uninit));
+  return __uninit;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_u64 (void)
+{
+  uint64x2_t __uninit;
+  __asm__ ("": "=w"(__uninit));
+  return __uninit;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_s8 (void)
+{
+  int8x16_t __uninit;
+  __asm__ ("": "=w"(__uninit));
+  return __uninit;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_s16 (void)
+{
+  int16x8_t __uninit;
+  __asm__ ("": "=w"(__uninit));
+  return __uninit;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_s32 (void)
+{
+  int32x4_t __uninit;
+  __asm__ ("": "=w"(__uninit));
+  return __uninit;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_s64 (void)
+{
+  int64x2_t __uninit;
+  __asm__ ("": "=w"(__uninit));
+  return __uninit;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_s32 (int32x4_t __a)
+{
+  return (int16x8_t)  __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_s64 (int64x2_t __a)
+{
+  return (int16x8_t)  __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_s8 (int8x16_t __a)
+{
+  return (int16x8_t)  __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_u16 (uint16x8_t __a)
+{
+  return (int16x8_t)  __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_u32 (uint32x4_t __a)
+{
+  return (int16x8_t)  __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_u64 (uint64x2_t __a)
+{
+  return (int16x8_t)  __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_u8 (uint8x16_t __a)
+{
+  return (int16x8_t)  __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_s16 (int16x8_t __a)
+{
+  return (int32x4_t)  __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_s64 (int64x2_t __a)
+{
+  return (int32x4_t)  __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_s8 (int8x16_t __a)
+{
+  return (int32x4_t)  __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_u16 (uint16x8_t __a)
+{
+  return (int32x4_t)  __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_u32 (uint32x4_t __a)
+{
+  return (int32x4_t)  __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_u64 (uint64x2_t __a)
+{
+  return (int32x4_t)  __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_u8 (uint8x16_t __a)
+{
+  return (int32x4_t)  __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_s16 (int16x8_t __a)
+{
+  return (int64x2_t)  __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_s32 (int32x4_t __a)
+{
+  return (int64x2_t)  __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_s8 (int8x16_t __a)
+{
+  return (int64x2_t)  __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_u16 (uint16x8_t __a)
+{
+  return (int64x2_t)  __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_u32 (uint32x4_t __a)
+{
+  return (int64x2_t)  __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_u64 (uint64x2_t __a)
+{
+  return (int64x2_t)  __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_u8 (uint8x16_t __a)
+{
+  return (int64x2_t)  __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_s16 (int16x8_t __a)
+{
+  return (int8x16_t)  __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_s32 (int32x4_t __a)
+{
+  return (int8x16_t)  __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_s64 (int64x2_t __a)
+{
+  return (int8x16_t)  __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_u16 (uint16x8_t __a)
+{
+  return (int8x16_t)  __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_u32 (uint32x4_t __a)
+{
+  return (int8x16_t)  __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_u64 (uint64x2_t __a)
+{
+  return (int8x16_t)  __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_u8 (uint8x16_t __a)
+{
+  return (int8x16_t)  __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_s16 (int16x8_t __a)
+{
+  return (uint16x8_t)  __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_s32 (int32x4_t __a)
+{
+  return (uint16x8_t)  __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_s64 (int64x2_t __a)
+{
+  return (uint16x8_t)  __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_s8 (int8x16_t __a)
+{
+  return (uint16x8_t)  __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_u32 (uint32x4_t __a)
+{
+  return (uint16x8_t)  __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_u64 (uint64x2_t __a)
+{
+  return (uint16x8_t)  __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_u8 (uint8x16_t __a)
+{
+  return (uint16x8_t)  __a;
+}
+
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_s16 (int16x8_t __a)
+{
+  return (uint32x4_t)  __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_s32 (int32x4_t __a)
+{
+  return (uint32x4_t)  __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_s64 (int64x2_t __a)
+{
+  return (uint32x4_t)  __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_s8 (int8x16_t __a)
+{
+  return (uint32x4_t)  __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_u16 (uint16x8_t __a)
+{
+  return (uint32x4_t)  __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_u64 (uint64x2_t __a)
+{
+  return (uint32x4_t)  __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_u8 (uint8x16_t __a)
+{
+  return (uint32x4_t)  __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_s16 (int16x8_t __a)
+{
+  return (uint64x2_t)  __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_s32 (int32x4_t __a)
+{
+  return (uint64x2_t)  __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_s64 (int64x2_t __a)
+{
+  return (uint64x2_t)  __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_s8 (int8x16_t __a)
+{
+  return (uint64x2_t)  __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_u16 (uint16x8_t __a)
+{
+  return (uint64x2_t)  __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_u32 (uint32x4_t __a)
+{
+  return (uint64x2_t)  __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_u8 (uint8x16_t __a)
+{
+  return (uint64x2_t)  __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_s16 (int16x8_t __a)
+{
+  return (uint8x16_t)  __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_s32 (int32x4_t __a)
+{
+  return (uint8x16_t)  __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_s64 (int64x2_t __a)
+{
+  return (uint8x16_t)  __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_s8 (int8x16_t __a)
+{
+  return (uint8x16_t)  __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_u16 (uint16x8_t __a)
+{
+  return (uint8x16_t)  __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_u32 (uint32x4_t __a)
+{
+  return (uint8x16_t)  __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_u64 (uint64x2_t __a)
+{
+  return (uint8x16_t)  __a;
+}
+
 #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point.  */
 
 __extension__ extern __inline void
@@ -14771,6 +15336,262 @@  __arm_vaddq_f32 (float32x4_t __a, float32x4_t __b)
   return __a + __b;
 }
 
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_f16 (void)
+{
+  float16x8_t __uninit;
+  __asm__ ("": "=w" (__uninit));
+  return __uninit;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_f32 (void)
+{
+  float32x4_t __uninit;
+  __asm__ ("": "=w" (__uninit));
+  return __uninit;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_f16 (float16x8_t __a)
+{
+  return (int32x4_t)  __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_f32 (float32x4_t __a)
+{
+  return (int32x4_t)  __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_f16 (float16x8_t __a)
+{
+  return (int16x8_t)  __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_f32 (float32x4_t __a)
+{
+  return (int16x8_t)  __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_f16 (float16x8_t __a)
+{
+  return (int64x2_t)  __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_f32 (float32x4_t __a)
+{
+  return (int64x2_t)  __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_f16 (float16x8_t __a)
+{
+  return (int8x16_t)  __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_f32 (float32x4_t __a)
+{
+  return (int8x16_t)  __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_f16 (float16x8_t __a)
+{
+  return (uint16x8_t)  __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_f32 (float32x4_t __a)
+{
+  return (uint16x8_t)  __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_f16 (float16x8_t __a)
+{
+  return (uint32x4_t)  __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_f32 (float32x4_t __a)
+{
+  return (uint32x4_t)  __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_f16 (float16x8_t __a)
+{
+  return (uint64x2_t)  __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_f32 (float32x4_t __a)
+{
+  return (uint64x2_t)  __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_f16 (float16x8_t __a)
+{
+  return (uint8x16_t)  __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_f32 (float32x4_t __a)
+{
+  return (uint8x16_t)  __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_f32 (float32x4_t __a)
+{
+  return (float16x8_t)  __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_s16 (int16x8_t __a)
+{
+  return (float16x8_t)  __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_s32 (int32x4_t __a)
+{
+  return (float16x8_t)  __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_s64 (int64x2_t __a)
+{
+  return (float16x8_t)  __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_s8 (int8x16_t __a)
+{
+  return (float16x8_t)  __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_u16 (uint16x8_t __a)
+{
+  return (float16x8_t)  __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_u32 (uint32x4_t __a)
+{
+  return (float16x8_t)  __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_u64 (uint64x2_t __a)
+{
+  return (float16x8_t)  __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_u8 (uint8x16_t __a)
+{
+  return (float16x8_t)  __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_f16 (float16x8_t __a)
+{
+  return (float32x4_t)  __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_s16 (int16x8_t __a)
+{
+  return (float32x4_t)  __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_s32 (int32x4_t __a)
+{
+  return (float32x4_t)  __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_s64 (int64x2_t __a)
+{
+  return (float32x4_t)  __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_s8 (int8x16_t __a)
+{
+  return (float32x4_t)  __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_u16 (uint16x8_t __a)
+{
+  return (float32x4_t)  __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_u32 (uint32x4_t __a)
+{
+  return (float32x4_t)  __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_u64 (uint64x2_t __a)
+{
+  return (float32x4_t)  __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_u8 (uint8x16_t __a)
+{
+  return (float32x4_t)  __a;
+}
+
 #endif
 
 enum {
@@ -17543,6 +18364,150 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
   int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
 
+#define vuninitializedq(p0) __arm_vuninitializedq(p0)
+#define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 (), \
+  int (*)[__ARM_mve_type_float16x8_t]: __arm_vuninitializedq_f16 (), \
+  int (*)[__ARM_mve_type_float32x4_t]: __arm_vuninitializedq_f32 ());})
+
+#define vreinterpretq_f16(p0) __arm_vreinterpretq_f16(p0)
+#define __arm_vreinterpretq_f16(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_f16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define vreinterpretq_f32(p0) __arm_vreinterpretq_f32(p0)
+#define __arm_vreinterpretq_f32(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
+
+#define vreinterpretq_s16(p0) __arm_vreinterpretq_s16(p0)
+#define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define vreinterpretq_s32(p0) __arm_vreinterpretq_s32(p0)
+#define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define vreinterpretq_s64(p0) __arm_vreinterpretq_s64(p0)
+#define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define vreinterpretq_s8(p0) __arm_vreinterpretq_s8(p0)
+#define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define vreinterpretq_u16(p0) __arm_vreinterpretq_u16(p0)
+#define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define vreinterpretq_u32(p0) __arm_vreinterpretq_u32(p0)
+#define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define vreinterpretq_u64(p0) __arm_vreinterpretq_u64(p0)
+#define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define vreinterpretq_u8(p0) __arm_vreinterpretq_u8(p0)
+#define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+  int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
 #else /* MVE Integer.  */
 
 #define vst4q(p0,p1) __arm_vst4q(p0,p1)
@@ -19925,6 +20890,106 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
   int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
 
+#define vuninitializedq(p0) __arm_vuninitializedq(p0)
+#define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 ());})
+
+#define vreinterpretq_s16(p0) __arm_vreinterpretq_s16(p0)
+#define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define vreinterpretq_s32(p0) __arm_vreinterpretq_s32(p0)
+#define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define vreinterpretq_s64(p0) __arm_vreinterpretq_s64(p0)
+#define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define vreinterpretq_s8(p0) __arm_vreinterpretq_s8(p0)
+#define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define vreinterpretq_u16(p0) __arm_vreinterpretq_u16(p0)
+#define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define vreinterpretq_u32(p0) __arm_vreinterpretq_u32(p0)
+#define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define vreinterpretq_u64(p0) __arm_vreinterpretq_u64(p0)
+#define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)));})
+
+#define vreinterpretq_u8(p0) __arm_vreinterpretq_u8(p0)
+#define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+  int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+  int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
 #endif /* MVE Integer.  */
 
 #define vldrdq_gather_offset(p0,p1) __arm_vldrdq_gather_offset(p0,p1)
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_f16.c
new file mode 100644
index 0000000000000000000000000000000000000000..bc40440296522a96a7c6fb0a7732c735ea37b266
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_f16.c
@@ -0,0 +1,45 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+int8x16_t value1;
+int64x2_t value2;
+int32x4_t value3;
+uint8x16_t value4;
+uint16x8_t value5;
+uint64x2_t value6;
+uint32x4_t value7;
+int16x8_t value8;
+float32x4_t value9;
+
+float16x8_t
+foo ()
+{
+  float16x8_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_f16 (vreinterpretq_f16_s8 (value1), vreinterpretq_f16_s64 (value2));
+  r2 = vaddq_f16 (r1, vreinterpretq_f16_s32 (value3));
+  r3 = vaddq_f16 (r2, vreinterpretq_f16_u8 (value4));
+  r4 = vaddq_f16 (r3, vreinterpretq_f16_u16 (value5));
+  r5 = vaddq_f16 (r4, vreinterpretq_f16_u64 (value6));
+  r6 = vaddq_f16 (r5, vreinterpretq_f16_u32 (value7));
+  r7 = vaddq_f16 (r6, vreinterpretq_f16_s16 (value8));
+  return vaddq_f16 (r7, vreinterpretq_f16_f32 (value9));
+}
+
+float16x8_t
+foo1 ()
+{
+  float16x8_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_f16 (vreinterpretq_f16 (value1), vreinterpretq_f16 (value2));
+  r2 = vaddq_f16 (r1, vreinterpretq_f16 (value3));
+  r3 = vaddq_f16 (r2, vreinterpretq_f16 (value4));
+  r4 = vaddq_f16 (r3, vreinterpretq_f16 (value5));
+  r5 = vaddq_f16 (r4, vreinterpretq_f16 (value6));
+  r6 = vaddq_f16 (r5, vreinterpretq_f16 (value7));
+  r7 = vaddq_f16 (r6, vreinterpretq_f16 (value8));
+  return vaddq_f16 (r7, vreinterpretq_f16 (value9));
+}
+
+/* { dg-final { scan-assembler-times "vadd.f16" 8 } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_f32.c
new file mode 100644
index 0000000000000000000000000000000000000000..d30818b0f3d896f7e9f30227b8578f90d2731209
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_f32.c
@@ -0,0 +1,45 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+int16x8_t value1;
+int64x2_t value2;
+int8x16_t value3;
+uint8x16_t value4;
+uint16x8_t value5;
+uint64x2_t value6;
+uint32x4_t value7;
+float16x8_t value8;
+int32x4_t value9;
+
+float32x4_t
+foo ()
+{
+  float32x4_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_f32 (vreinterpretq_f32_s16 (value1), vreinterpretq_f32_s64 (value2));
+  r2 = vaddq_f32 (r1, vreinterpretq_f32_s8 (value3));
+  r3 = vaddq_f32 (r2, vreinterpretq_f32_u8 (value4));
+  r4 = vaddq_f32 (r3, vreinterpretq_f32_u16 (value5));
+  r5 = vaddq_f32 (r4, vreinterpretq_f32_u64 (value6));
+  r6 = vaddq_f32 (r5, vreinterpretq_f32_u32 (value7));
+  r7 = vaddq_f32 (r6, vreinterpretq_f32_f16 (value8));
+  return vaddq_f32 (r7, vreinterpretq_f32_s32 (value9));
+}
+
+float32x4_t
+foo1 ()
+{
+  float32x4_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_f32 (vreinterpretq_f32 (value1), vreinterpretq_f32 (value2));
+  r2 = vaddq_f32 (r1, vreinterpretq_f32 (value3));
+  r3 = vaddq_f32 (r2, vreinterpretq_f32 (value4));
+  r4 = vaddq_f32 (r3, vreinterpretq_f32 (value5));
+  r5 = vaddq_f32 (r4, vreinterpretq_f32 (value6));
+  r6 = vaddq_f32 (r5, vreinterpretq_f32 (value7));
+  r7 = vaddq_f32 (r6, vreinterpretq_f32 (value8));
+  return vaddq_f32 (r7, vreinterpretq_f32 (value9));
+}
+
+/* { dg-final { scan-assembler-times "vadd.f32" 8 } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s16.c
new file mode 100644
index 0000000000000000000000000000000000000000..627a9d8de7c7ac850c3b9f1049057264d908b34d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s16.c
@@ -0,0 +1,45 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+int8x16_t value1;
+int64x2_t value2;
+int32x4_t value3;
+uint8x16_t value4;
+uint16x8_t value5;
+uint64x2_t value6;
+uint32x4_t value7;
+float16x8_t value8;
+float32x4_t value9;
+
+int16x8_t
+foo ()
+{
+  int16x8_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_s16 (vreinterpretq_s16_s8 (value1), vreinterpretq_s16_s64 (value2));
+  r2 = vaddq_s16 (r1, vreinterpretq_s16_s32 (value3));
+  r3 = vaddq_s16 (r2, vreinterpretq_s16_u8 (value4));
+  r4 = vaddq_s16 (r3, vreinterpretq_s16_u16 (value5));
+  r5 = vaddq_s16 (r4, vreinterpretq_s16_u64 (value6));
+  r6 = vaddq_s16 (r5, vreinterpretq_s16_u32 (value7));
+  r7 = vaddq_s16 (r6, vreinterpretq_s16_f16 (value8));
+  return vaddq_s16 (r7, vreinterpretq_s16_f32 (value9));
+}
+
+int16x8_t
+foo1 ()
+{
+  int16x8_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_s16 (vreinterpretq_s16 (value1), vreinterpretq_s16 (value2));
+  r2 = vaddq_s16 (r1, vreinterpretq_s16 (value3));
+  r3 = vaddq_s16 (r2, vreinterpretq_s16 (value4));
+  r4 = vaddq_s16 (r3, vreinterpretq_s16 (value5));
+  r5 = vaddq_s16 (r4, vreinterpretq_s16 (value6));
+  r6 = vaddq_s16 (r5, vreinterpretq_s16 (value7));
+  r7 = vaddq_s16 (r6, vreinterpretq_s16 (value8));
+  return vaddq_s16 (r7, vreinterpretq_s16 (value9));
+}
+
+/* { dg-final { scan-assembler-times "vadd.i16" 8 } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s32.c
new file mode 100644
index 0000000000000000000000000000000000000000..1b905e1095348e1e4376e0ef695f4b607a29f8b0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s32.c
@@ -0,0 +1,45 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+int16x8_t value1;
+int64x2_t value2;
+int8x16_t value3;
+uint8x16_t value4;
+uint16x8_t value5;
+uint64x2_t value6;
+uint32x4_t value7;
+float16x8_t value8;
+float32x4_t value9;
+
+int32x4_t
+foo ()
+{
+  int32x4_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_s32 (vreinterpretq_s32_s16 (value1), vreinterpretq_s32_s64 (value2));
+  r2 = vaddq_s32 (r1, vreinterpretq_s32_s8 (value3));
+  r3 = vaddq_s32 (r2, vreinterpretq_s32_u8 (value4));
+  r4 = vaddq_s32 (r3, vreinterpretq_s32_u16 (value5));
+  r5 = vaddq_s32 (r4, vreinterpretq_s32_u64 (value6));
+  r6 = vaddq_s32 (r5, vreinterpretq_s32_u32 (value7));
+  r7 = vaddq_s32 (r6, vreinterpretq_s32_f16 (value8));
+  return vaddq_s32 (r7, vreinterpretq_s32_f32 (value9));
+}
+
+int32x4_t
+foo1 ()
+{
+  int32x4_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_s32 (vreinterpretq_s32 (value1), vreinterpretq_s32 (value2));
+  r2 = vaddq_s32 (r1, vreinterpretq_s32 (value3));
+  r3 = vaddq_s32 (r2, vreinterpretq_s32 (value4));
+  r4 = vaddq_s32 (r3, vreinterpretq_s32 (value5));
+  r5 = vaddq_s32 (r4, vreinterpretq_s32 (value6));
+  r6 = vaddq_s32 (r5, vreinterpretq_s32 (value7));
+  r7 = vaddq_s32 (r6, vreinterpretq_s32 (value8));
+  return vaddq_s32 (r7, vreinterpretq_s32 (value9));
+}
+
+/* { dg-final { scan-assembler-times "vadd.i32" 8 } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s64.c
new file mode 100644
index 0000000000000000000000000000000000000000..3a9fa0b414c202c77890e0cf061102b19fb7e623
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s64.c
@@ -0,0 +1,46 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+int16x8_t value1;
+int8x16_t value2;
+int32x4_t value3;
+uint8x16_t value4;
+uint16x8_t value5;
+uint64x2_t value6;
+uint32x4_t value7;
+float16x8_t value8;
+float32x4_t value9;
+
+int64x2_t
+foo (mve_pred16_t __p)
+{
+  int64x2_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vpselq_s64 (vreinterpretq_s64_s16 (value1), vreinterpretq_s64_s8 (value2),
+		   __p);
+  r2 = vpselq_s64 (r1, vreinterpretq_s64_s32 (value3), __p);
+  r3 = vpselq_s64 (r2, vreinterpretq_s64_u8 (value4), __p);
+  r4 = vpselq_s64 (r3, vreinterpretq_s64_u16 (value5), __p);
+  r5 = vpselq_s64 (r4, vreinterpretq_s64_u64 (value6), __p);
+  r6 = vpselq_s64 (r5, vreinterpretq_s64_u32 (value7), __p);
+  r7 = vpselq_s64 (r6, vreinterpretq_s64_f16 (value8), __p);
+  return vpselq_s64 (r7, vreinterpretq_s64_f32 (value9), __p);
+}
+
+int64x2_t
+foo1 (mve_pred16_t __p)
+{
+  int64x2_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vpselq_s64 (vreinterpretq_s64 (value1), vreinterpretq_s64 (value2), __p);
+  r2 = vpselq_s64 (r1, vreinterpretq_s64 (value3), __p);
+  r3 = vpselq_s64 (r2, vreinterpretq_s64 (value4), __p);
+  r4 = vpselq_s64 (r3, vreinterpretq_s64 (value5), __p);
+  r5 = vpselq_s64 (r4, vreinterpretq_s64 (value6), __p);
+  r6 = vpselq_s64 (r5, vreinterpretq_s64 (value7), __p);
+  r7 = vpselq_s64 (r6, vreinterpretq_s64 (value8), __p);
+  return vpselq_s64 (r7, vreinterpretq_s64 (value9), __p);
+}
+
+/* { dg-final { scan-assembler-times "vpsel" 8 } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s8.c
new file mode 100644
index 0000000000000000000000000000000000000000..522a935c72f81bad63bdf2f56db135fc4261c766
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_s8.c
@@ -0,0 +1,45 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+int16x8_t value1;
+int64x2_t value2;
+int32x4_t value3;
+uint8x16_t value4;
+uint16x8_t value5;
+uint64x2_t value6;
+uint32x4_t value7;
+float16x8_t value8;
+float32x4_t value9;
+
+int8x16_t
+foo ()
+{
+  int8x16_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_s8 (vreinterpretq_s8_s16 (value1), vreinterpretq_s8_s64 (value2));
+  r2 = vaddq_s8 (r1, vreinterpretq_s8_s32 (value3));
+  r3 = vaddq_s8 (r2, vreinterpretq_s8_u8 (value4));
+  r4 = vaddq_s8 (r3, vreinterpretq_s8_u16 (value5));
+  r5 = vaddq_s8 (r4, vreinterpretq_s8_u64 (value6));
+  r6 = vaddq_s8 (r5, vreinterpretq_s8_u32 (value7));
+  r7 = vaddq_s8 (r6, vreinterpretq_s8_f16 (value8));
+  return vaddq_s8 (r7, vreinterpretq_s8_f32 (value9));
+}
+
+int8x16_t
+foo1 ()
+{
+  int8x16_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_s8 (vreinterpretq_s8 (value1), vreinterpretq_s8 (value2));
+  r2 = vaddq_s8 (r1, vreinterpretq_s8 (value3));
+  r3 = vaddq_s8 (r2, vreinterpretq_s8 (value4));
+  r4 = vaddq_s8 (r3, vreinterpretq_s8 (value5));
+  r5 = vaddq_s8 (r4, vreinterpretq_s8 (value6));
+  r6 = vaddq_s8 (r5, vreinterpretq_s8 (value7));
+  r7 = vaddq_s8 (r6, vreinterpretq_s8 (value8));
+  return vaddq_s8 (r7, vreinterpretq_s8 (value9));
+}
+
+/* { dg-final { scan-assembler-times "vadd.i8" 8 } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u16.c
new file mode 100644
index 0000000000000000000000000000000000000000..402c0ef61df85be4115f14fdf195548ecd15b25f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u16.c
@@ -0,0 +1,45 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+int8x16_t value1;
+int64x2_t value2;
+int32x4_t value3;
+uint8x16_t value4;
+int16x8_t value5;
+uint64x2_t value6;
+uint32x4_t value7;
+float16x8_t value8;
+float32x4_t value9;
+
+uint16x8_t
+foo ()
+{
+  uint16x8_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_u16 (vreinterpretq_u16_s8 (value1), vreinterpretq_u16_s64 (value2));
+  r2 = vaddq_u16 (r1, vreinterpretq_u16_s32 (value3));
+  r3 = vaddq_u16 (r2, vreinterpretq_u16_u8 (value4));
+  r4 = vaddq_u16 (r3, vreinterpretq_u16_s16 (value5));
+  r5 = vaddq_u16 (r4, vreinterpretq_u16_u64 (value6));
+  r6 = vaddq_u16 (r5, vreinterpretq_u16_u32 (value7));
+  r7 = vaddq_u16 (r6, vreinterpretq_u16_f16 (value8));
+  return vaddq_u16 (r7, vreinterpretq_u16_f32 (value9));
+}
+
+uint16x8_t
+foo1 ()
+{
+  uint16x8_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_u16 (vreinterpretq_u16 (value1), vreinterpretq_u16 (value2));
+  r2 = vaddq_u16 (r1, vreinterpretq_u16 (value3));
+  r3 = vaddq_u16 (r2, vreinterpretq_u16 (value4));
+  r4 = vaddq_u16 (r3, vreinterpretq_u16 (value5));
+  r5 = vaddq_u16 (r4, vreinterpretq_u16 (value6));
+  r6 = vaddq_u16 (r5, vreinterpretq_u16 (value7));
+  r7 = vaddq_u16 (r6, vreinterpretq_u16 (value8));
+  return vaddq_u16 (r7, vreinterpretq_u16 (value9));
+}
+
+/* { dg-final { scan-assembler-times "vadd.i16" 8 } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u32.c
new file mode 100644
index 0000000000000000000000000000000000000000..985d776831e7235002a62e88ba9bdf128e31bfd9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u32.c
@@ -0,0 +1,45 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+int16x8_t value1;
+int64x2_t value2;
+int8x16_t value3;
+uint8x16_t value4;
+uint16x8_t value5;
+uint64x2_t value6;
+int32x4_t value7;
+float16x8_t value8;
+float32x4_t value9;
+
+uint32x4_t
+foo ()
+{
+  uint32x4_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_u32 (vreinterpretq_u32_s16 (value1), vreinterpretq_u32_s64 (value2));
+  r2 = vaddq_u32 (r1, vreinterpretq_u32_s8 (value3));
+  r3 = vaddq_u32 (r2, vreinterpretq_u32_u8 (value4));
+  r4 = vaddq_u32 (r3, vreinterpretq_u32_u16 (value5));
+  r5 = vaddq_u32 (r4, vreinterpretq_u32_u64 (value6));
+  r6 = vaddq_u32 (r5, vreinterpretq_u32_s32 (value7));
+  r7 = vaddq_u32 (r6, vreinterpretq_u32_f16 (value8));
+  return vaddq_u32 (r7, vreinterpretq_u32_f32 (value9));
+}
+
+uint32x4_t
+foo1 ()
+{
+  uint32x4_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_u32 (vreinterpretq_u32 (value1), vreinterpretq_u32 (value2));
+  r2 = vaddq_u32 (r1, vreinterpretq_u32 (value3));
+  r3 = vaddq_u32 (r2, vreinterpretq_u32 (value4));
+  r4 = vaddq_u32 (r3, vreinterpretq_u32 (value5));
+  r5 = vaddq_u32 (r4, vreinterpretq_u32 (value6));
+  r6 = vaddq_u32 (r5, vreinterpretq_u32 (value7));
+  r7 = vaddq_u32 (r6, vreinterpretq_u32 (value8));
+  return vaddq_u32 (r7, vreinterpretq_u32 (value9));
+}
+
+/* { dg-final { scan-assembler-times "vadd.i32" 8 } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u64.c
new file mode 100644
index 0000000000000000000000000000000000000000..e77d253a993d13bd17b107fd68f0149c7714742a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u64.c
@@ -0,0 +1,46 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+int16x8_t value1;
+int8x16_t value2;
+int32x4_t value3;
+uint8x16_t value4;
+uint16x8_t value5;
+int64x2_t value6;
+uint32x4_t value7;
+float16x8_t value8;
+float32x4_t value9;
+
+uint64x2_t
+foo (mve_pred16_t __p)
+{
+  uint64x2_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vpselq_u64 (vreinterpretq_u64_s16 (value1), vreinterpretq_u64_s8 (value2),
+		   __p);
+  r2 = vpselq_u64 (r1, vreinterpretq_u64_s32 (value3), __p);
+  r3 = vpselq_u64 (r2, vreinterpretq_u64_u8 (value4), __p);
+  r4 = vpselq_u64 (r3, vreinterpretq_u64_u16 (value5), __p);
+  r5 = vpselq_u64 (r4, vreinterpretq_u64_s64 (value6), __p);
+  r6 = vpselq_u64 (r5, vreinterpretq_u64_u32 (value7), __p);
+  r7 = vpselq_u64 (r6, vreinterpretq_u64_f16 (value8), __p);
+  return vpselq_u64 (r7, vreinterpretq_u64_f32 (value9), __p);
+}
+
+uint64x2_t
+foo1 (mve_pred16_t __p)
+{
+  uint64x2_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vpselq_u64 (vreinterpretq_u64 (value1), vreinterpretq_u64 (value2), __p);
+  r2 = vpselq_u64 (r1, vreinterpretq_u64 (value3), __p);
+  r3 = vpselq_u64 (r2, vreinterpretq_u64 (value4), __p);
+  r4 = vpselq_u64 (r3, vreinterpretq_u64 (value5), __p);
+  r5 = vpselq_u64 (r4, vreinterpretq_u64 (value6), __p);
+  r6 = vpselq_u64 (r5, vreinterpretq_u64 (value7), __p);
+  r7 = vpselq_u64 (r6, vreinterpretq_u64 (value8), __p);
+  return vpselq_u64 (r7, vreinterpretq_u64 (value9), __p);
+}
+
+/* { dg-final { scan-assembler-times "vpsel" 8 } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u8.c
new file mode 100644
index 0000000000000000000000000000000000000000..9075dea900899dd599df565aa4f5a7c0c9be2a2d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vreinterpretq_u8.c
@@ -0,0 +1,45 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+int16x8_t value1;
+int64x2_t value2;
+int32x4_t value3;
+int8x16_t value4;
+uint16x8_t value5;
+uint64x2_t value6;
+uint32x4_t value7;
+float16x8_t value8;
+float32x4_t value9;
+
+uint8x16_t
+foo ()
+{
+  uint8x16_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_u8 (vreinterpretq_u8_s16 (value1), vreinterpretq_u8_s64 (value2));
+  r2 = vaddq_u8 (r1, vreinterpretq_u8_s32 (value3));
+  r3 = vaddq_u8 (r2, vreinterpretq_u8_s8 (value4));
+  r4 = vaddq_u8 (r3, vreinterpretq_u8_u16 (value5));
+  r5 = vaddq_u8 (r4, vreinterpretq_u8_u64 (value6));
+  r6 = vaddq_u8 (r5, vreinterpretq_u8_u32 (value7));
+  r7 = vaddq_u8 (r6, vreinterpretq_u8_f16 (value8));
+  return vaddq_u8 (r7, vreinterpretq_u8_f32 (value9));
+}
+
+uint8x16_t
+foo1 ()
+{
+  uint8x16_t r1,r2,r3,r4,r5,r6,r7;
+  r1 = vaddq_u8 (vreinterpretq_u8 (value1), vreinterpretq_u8 (value2));
+  r2 = vaddq_u8 (r1, vreinterpretq_u8 (value3));
+  r3 = vaddq_u8 (r2, vreinterpretq_u8 (value4));
+  r4 = vaddq_u8 (r3, vreinterpretq_u8 (value5));
+  r5 = vaddq_u8 (r4, vreinterpretq_u8 (value6));
+  r6 = vaddq_u8 (r5, vreinterpretq_u8 (value7));
+  r7 = vaddq_u8 (r6, vreinterpretq_u8 (value8));
+  return vaddq_u8 (r7, vreinterpretq_u8 (value9));
+}
+
+/* { dg-final { scan-assembler-times "vadd.i8" 8 } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_float.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_float.c
new file mode 100644
index 0000000000000000000000000000000000000000..761d569c2c0cd2363f80b2abc257b53c37b68697
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_float.c
@@ -0,0 +1,17 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O0" } */
+
+#include "arm_mve.h"
+
+void
+foo ()
+{
+  float16x8_t fa;
+  float32x4_t fb;
+  fa = vuninitializedq_f16 ();
+  fb = vuninitializedq_f32 ();
+}
+
+/* { dg-final { scan-assembler-times "vstrb.8" 4 } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_float1.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_float1.c
new file mode 100644
index 0000000000000000000000000000000000000000..173b978488a540e5502cc05efb97a5ea008ccf3b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_float1.c
@@ -0,0 +1,17 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O0" } */
+
+#include "arm_mve.h"
+
+void
+foo ()
+{
+  float16x8_t fa, faa;
+  float32x4_t fb, fbb;
+  fa = vuninitializedq (faa);
+  fb = vuninitializedq (fbb);
+}
+
+/* { dg-final { scan-assembler-times "vstrb.8" 4444} */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_int.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_int.c
new file mode 100644
index 0000000000000000000000000000000000000000..2969f331d80a0fa5f56a6f76077900db2af9a8e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_int.c
@@ -0,0 +1,29 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O0" } */
+
+#include "arm_mve.h"
+
+void
+foo ()
+{
+  int8x16_t a;
+  int16x8_t b;
+  int32x4_t c;
+  int64x2_t d;
+  uint8x16_t ua;
+  uint16x8_t ub;
+  uint32x4_t uc;
+  uint64x2_t ud;
+  a = vuninitializedq_s8 ();
+  b = vuninitializedq_s16 ();
+  c = vuninitializedq_s32 ();
+  d = vuninitializedq_s64 ();
+  ua = vuninitializedq_u8 ();
+  ub = vuninitializedq_u16 ();
+  uc = vuninitializedq_u32 ();
+  ud = vuninitializedq_u64 ();
+}
+
+/* { dg-final { scan-assembler-times "vstrb.8" 16 } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_int1.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_int1.c
new file mode 100644
index 0000000000000000000000000000000000000000..555019011a38842911177aa645516cee80c4abb0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vuninitializedq_int1.c
@@ -0,0 +1,29 @@ 
+/* { dg-do compile  } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O0" } */
+
+#include "arm_mve.h"
+
+void
+foo ()
+{
+  int8x16_t a, aa;
+  int16x8_t b, bb;
+  int32x4_t c, cc;
+  int64x2_t d, dd;
+  uint8x16_t ua, uaa;
+  uint16x8_t ub, ubb;
+  uint32x4_t uc, ucc;
+  uint64x2_t ud, udd;
+  a = vuninitializedq (aa);
+  b = vuninitializedq (bb);
+  c = vuninitializedq (cc);
+  d = vuninitializedq (dd);
+  ua = vuninitializedq (uaa);
+  ub = vuninitializedq (ubb);
+  uc = vuninitializedq (ucc);
+  ud = vuninitializedq (udd);
+}
+
+/* { dg-final { scan-assembler-times "vstrb.8" 24 } } */