diff mbox series

[11/22] arm: [MVE intrinsics] rework vandq veorq

Message ID 20230418134608.244751-12-christophe.lyon@arm.com
State New
Headers show
Series arm: New framework for MVE intrinsics | expand

Commit Message

Christophe Lyon April 18, 2023, 1:45 p.m. UTC
Implement vamdq, veorq using the new MVE builtins framework.

2022-09-08  Christophe Lyon <christophe.lyon@arm.com>

	gcc/
	* config/arm/arm-mve-builtins-base.cc (FUNCTION_WITH_RTX_M): New.
	(vandq,veorq): New.
	* config/arm/arm-mve-builtins-base.def (vandq, veorq): New.
	* config/arm/arm-mve-builtins-base.h (vandq, veorq): New.
	* config/arm/arm_mve.h (vandq): Remove.
	(vandq_m): Remove.
	(vandq_x): Remove.
	(vandq_u8): Remove.
	(vandq_s8): Remove.
	(vandq_u16): Remove.
	(vandq_s16): Remove.
	(vandq_u32): Remove.
	(vandq_s32): Remove.
	(vandq_f16): Remove.
	(vandq_f32): Remove.
	(vandq_m_s8): Remove.
	(vandq_m_s32): Remove.
	(vandq_m_s16): Remove.
	(vandq_m_u8): Remove.
	(vandq_m_u32): Remove.
	(vandq_m_u16): Remove.
	(vandq_m_f32): Remove.
	(vandq_m_f16): Remove.
	(vandq_x_s8): Remove.
	(vandq_x_s16): Remove.
	(vandq_x_s32): Remove.
	(vandq_x_u8): Remove.
	(vandq_x_u16): Remove.
	(vandq_x_u32): Remove.
	(vandq_x_f16): Remove.
	(vandq_x_f32): Remove.
	(__arm_vandq_u8): Remove.
	(__arm_vandq_s8): Remove.
	(__arm_vandq_u16): Remove.
	(__arm_vandq_s16): Remove.
	(__arm_vandq_u32): Remove.
	(__arm_vandq_s32): Remove.
	(__arm_vandq_m_s8): Remove.
	(__arm_vandq_m_s32): Remove.
	(__arm_vandq_m_s16): Remove.
	(__arm_vandq_m_u8): Remove.
	(__arm_vandq_m_u32): Remove.
	(__arm_vandq_m_u16): Remove.
	(__arm_vandq_x_s8): Remove.
	(__arm_vandq_x_s16): Remove.
	(__arm_vandq_x_s32): Remove.
	(__arm_vandq_x_u8): Remove.
	(__arm_vandq_x_u16): Remove.
	(__arm_vandq_x_u32): Remove.
	(__arm_vandq_f16): Remove.
	(__arm_vandq_f32): Remove.
	(__arm_vandq_m_f32): Remove.
	(__arm_vandq_m_f16): Remove.
	(__arm_vandq_x_f16): Remove.
	(__arm_vandq_x_f32): Remove.
	(__arm_vandq): Remove.
	(__arm_vandq_m): Remove.
	(__arm_vandq_x): Remove.
	(veorq_m): Remove.
	(veorq_x): Remove.
	(veorq_u8): Remove.
	(veorq_s8): Remove.
	(veorq_u16): Remove.
	(veorq_s16): Remove.
	(veorq_u32): Remove.
	(veorq_s32): Remove.
	(veorq_f16): Remove.
	(veorq_f32): Remove.
	(veorq_m_s8): Remove.
	(veorq_m_s32): Remove.
	(veorq_m_s16): Remove.
	(veorq_m_u8): Remove.
	(veorq_m_u32): Remove.
	(veorq_m_u16): Remove.
	(veorq_m_f32): Remove.
	(veorq_m_f16): Remove.
	(veorq_x_s8): Remove.
	(veorq_x_s16): Remove.
	(veorq_x_s32): Remove.
	(veorq_x_u8): Remove.
	(veorq_x_u16): Remove.
	(veorq_x_u32): Remove.
	(veorq_x_f16): Remove.
	(veorq_x_f32): Remove.
	(__arm_veorq_u8): Remove.
	(__arm_veorq_s8): Remove.
	(__arm_veorq_u16): Remove.
	(__arm_veorq_s16): Remove.
	(__arm_veorq_u32): Remove.
	(__arm_veorq_s32): Remove.
	(__arm_veorq_m_s8): Remove.
	(__arm_veorq_m_s32): Remove.
	(__arm_veorq_m_s16): Remove.
	(__arm_veorq_m_u8): Remove.
	(__arm_veorq_m_u32): Remove.
	(__arm_veorq_m_u16): Remove.
	(__arm_veorq_x_s8): Remove.
	(__arm_veorq_x_s16): Remove.
	(__arm_veorq_x_s32): Remove.
	(__arm_veorq_x_u8): Remove.
	(__arm_veorq_x_u16): Remove.
	(__arm_veorq_x_u32): Remove.
	(__arm_veorq_f16): Remove.
	(__arm_veorq_f32): Remove.
	(__arm_veorq_m_f32): Remove.
	(__arm_veorq_m_f16): Remove.
	(__arm_veorq_x_f16): Remove.
	(__arm_veorq_x_f32): Remove.
	(__arm_veorq): Remove.
	(__arm_veorq_m): Remove.
	(__arm_veorq_x): Remove.
---
 gcc/config/arm/arm-mve-builtins-base.cc  |  10 +
 gcc/config/arm/arm-mve-builtins-base.def |   4 +
 gcc/config/arm/arm-mve-builtins-base.h   |   2 +
 gcc/config/arm/arm_mve.h                 | 862 -----------------------
 4 files changed, 16 insertions(+), 862 deletions(-)

Comments

Kyrylo Tkachov May 2, 2023, 4:37 p.m. UTC | #1
> -----Original Message-----
> From: Christophe Lyon <christophe.lyon@arm.com>
> Sent: Tuesday, April 18, 2023 2:46 PM
> To: gcc-patches@gcc.gnu.org; Kyrylo Tkachov <Kyrylo.Tkachov@arm.com>;
> Richard Earnshaw <Richard.Earnshaw@arm.com>; Richard Sandiford
> <Richard.Sandiford@arm.com>
> Cc: Christophe Lyon <Christophe.Lyon@arm.com>
> Subject: [PATCH 11/22] arm: [MVE intrinsics] rework vandq veorq
> 
> Implement vamdq, veorq using the new MVE builtins framework.
> 

Ok.
Thanks,
Kyrill

> 2022-09-08  Christophe Lyon <christophe.lyon@arm.com>
> 
> 	gcc/
> 	* config/arm/arm-mve-builtins-base.cc (FUNCTION_WITH_RTX_M):
> New.
> 	(vandq,veorq): New.
> 	* config/arm/arm-mve-builtins-base.def (vandq, veorq): New.
> 	* config/arm/arm-mve-builtins-base.h (vandq, veorq): New.
> 	* config/arm/arm_mve.h (vandq): Remove.
> 	(vandq_m): Remove.
> 	(vandq_x): Remove.
> 	(vandq_u8): Remove.
> 	(vandq_s8): Remove.
> 	(vandq_u16): Remove.
> 	(vandq_s16): Remove.
> 	(vandq_u32): Remove.
> 	(vandq_s32): Remove.
> 	(vandq_f16): Remove.
> 	(vandq_f32): Remove.
> 	(vandq_m_s8): Remove.
> 	(vandq_m_s32): Remove.
> 	(vandq_m_s16): Remove.
> 	(vandq_m_u8): Remove.
> 	(vandq_m_u32): Remove.
> 	(vandq_m_u16): Remove.
> 	(vandq_m_f32): Remove.
> 	(vandq_m_f16): Remove.
> 	(vandq_x_s8): Remove.
> 	(vandq_x_s16): Remove.
> 	(vandq_x_s32): Remove.
> 	(vandq_x_u8): Remove.
> 	(vandq_x_u16): Remove.
> 	(vandq_x_u32): Remove.
> 	(vandq_x_f16): Remove.
> 	(vandq_x_f32): Remove.
> 	(__arm_vandq_u8): Remove.
> 	(__arm_vandq_s8): Remove.
> 	(__arm_vandq_u16): Remove.
> 	(__arm_vandq_s16): Remove.
> 	(__arm_vandq_u32): Remove.
> 	(__arm_vandq_s32): Remove.
> 	(__arm_vandq_m_s8): Remove.
> 	(__arm_vandq_m_s32): Remove.
> 	(__arm_vandq_m_s16): Remove.
> 	(__arm_vandq_m_u8): Remove.
> 	(__arm_vandq_m_u32): Remove.
> 	(__arm_vandq_m_u16): Remove.
> 	(__arm_vandq_x_s8): Remove.
> 	(__arm_vandq_x_s16): Remove.
> 	(__arm_vandq_x_s32): Remove.
> 	(__arm_vandq_x_u8): Remove.
> 	(__arm_vandq_x_u16): Remove.
> 	(__arm_vandq_x_u32): Remove.
> 	(__arm_vandq_f16): Remove.
> 	(__arm_vandq_f32): Remove.
> 	(__arm_vandq_m_f32): Remove.
> 	(__arm_vandq_m_f16): Remove.
> 	(__arm_vandq_x_f16): Remove.
> 	(__arm_vandq_x_f32): Remove.
> 	(__arm_vandq): Remove.
> 	(__arm_vandq_m): Remove.
> 	(__arm_vandq_x): Remove.
> 	(veorq_m): Remove.
> 	(veorq_x): Remove.
> 	(veorq_u8): Remove.
> 	(veorq_s8): Remove.
> 	(veorq_u16): Remove.
> 	(veorq_s16): Remove.
> 	(veorq_u32): Remove.
> 	(veorq_s32): Remove.
> 	(veorq_f16): Remove.
> 	(veorq_f32): Remove.
> 	(veorq_m_s8): Remove.
> 	(veorq_m_s32): Remove.
> 	(veorq_m_s16): Remove.
> 	(veorq_m_u8): Remove.
> 	(veorq_m_u32): Remove.
> 	(veorq_m_u16): Remove.
> 	(veorq_m_f32): Remove.
> 	(veorq_m_f16): Remove.
> 	(veorq_x_s8): Remove.
> 	(veorq_x_s16): Remove.
> 	(veorq_x_s32): Remove.
> 	(veorq_x_u8): Remove.
> 	(veorq_x_u16): Remove.
> 	(veorq_x_u32): Remove.
> 	(veorq_x_f16): Remove.
> 	(veorq_x_f32): Remove.
> 	(__arm_veorq_u8): Remove.
> 	(__arm_veorq_s8): Remove.
> 	(__arm_veorq_u16): Remove.
> 	(__arm_veorq_s16): Remove.
> 	(__arm_veorq_u32): Remove.
> 	(__arm_veorq_s32): Remove.
> 	(__arm_veorq_m_s8): Remove.
> 	(__arm_veorq_m_s32): Remove.
> 	(__arm_veorq_m_s16): Remove.
> 	(__arm_veorq_m_u8): Remove.
> 	(__arm_veorq_m_u32): Remove.
> 	(__arm_veorq_m_u16): Remove.
> 	(__arm_veorq_x_s8): Remove.
> 	(__arm_veorq_x_s16): Remove.
> 	(__arm_veorq_x_s32): Remove.
> 	(__arm_veorq_x_u8): Remove.
> 	(__arm_veorq_x_u16): Remove.
> 	(__arm_veorq_x_u32): Remove.
> 	(__arm_veorq_f16): Remove.
> 	(__arm_veorq_f32): Remove.
> 	(__arm_veorq_m_f32): Remove.
> 	(__arm_veorq_m_f16): Remove.
> 	(__arm_veorq_x_f16): Remove.
> 	(__arm_veorq_x_f32): Remove.
> 	(__arm_veorq): Remove.
> 	(__arm_veorq_m): Remove.
> 	(__arm_veorq_x): Remove.
> ---
>  gcc/config/arm/arm-mve-builtins-base.cc  |  10 +
>  gcc/config/arm/arm-mve-builtins-base.def |   4 +
>  gcc/config/arm/arm-mve-builtins-base.h   |   2 +
>  gcc/config/arm/arm_mve.h                 | 862 -----------------------
>  4 files changed, 16 insertions(+), 862 deletions(-)
> 
> diff --git a/gcc/config/arm/arm-mve-builtins-base.cc b/gcc/config/arm/arm-
> mve-builtins-base.cc
> index 48b09bffd0c..51fed8f671f 100644
> --- a/gcc/config/arm/arm-mve-builtins-base.cc
> +++ b/gcc/config/arm/arm-mve-builtins-base.cc
> @@ -90,7 +90,17 @@ namespace arm_mve {
>      UNSPEC##_M_S, UNSPEC##_M_U, UNSPEC##_M_F,
> 	\
>      UNSPEC##_M_N_S, UNSPEC##_M_N_U, UNSPEC##_M_N_F))
> 
> +  /* Helper for builtins with RTX codes, and _m predicated overrides.  */
> +#define FUNCTION_WITH_RTX_M(NAME, RTX, UNSPEC) FUNCTION
> 		\
> +  (NAME, unspec_based_mve_function_exact_insn,
> 	\
> +   (RTX, RTX, RTX,							\
> +    -1, -1, -1,								\
> +    UNSPEC##_M_S, UNSPEC##_M_U, UNSPEC##_M_F,
> 	\
> +    -1, -1, -1))
> +
>  FUNCTION_WITH_RTX_M_N (vaddq, PLUS, VADDQ)
> +FUNCTION_WITH_RTX_M (vandq, AND, VANDQ)
> +FUNCTION_WITH_RTX_M (veorq, XOR, VEORQ)
>  FUNCTION_WITH_RTX_M_N (vmulq, MULT, VMULQ)
>  FUNCTION (vreinterpretq, vreinterpretq_impl,)
>  FUNCTION_WITH_RTX_M_N (vsubq, MINUS, VSUBQ)
> diff --git a/gcc/config/arm/arm-mve-builtins-base.def b/gcc/config/arm/arm-
> mve-builtins-base.def
> index 624558c08b2..a933c9fc91e 100644
> --- a/gcc/config/arm/arm-mve-builtins-base.def
> +++ b/gcc/config/arm/arm-mve-builtins-base.def
> @@ -19,6 +19,8 @@
> 
>  #define REQUIRES_FLOAT false
>  DEF_MVE_FUNCTION (vaddq, binary_opt_n, all_integer, mx_or_none)
> +DEF_MVE_FUNCTION (vandq, binary, all_integer, mx_or_none)
> +DEF_MVE_FUNCTION (veorq, binary, all_integer, mx_or_none)
>  DEF_MVE_FUNCTION (vmulq, binary_opt_n, all_integer, mx_or_none)
>  DEF_MVE_FUNCTION (vreinterpretq, unary_convert, reinterpret_integer,
> none)
>  DEF_MVE_FUNCTION (vsubq, binary_opt_n, all_integer, mx_or_none)
> @@ -27,6 +29,8 @@ DEF_MVE_FUNCTION (vuninitializedq, inherent,
> all_integer_with_64, none)
> 
>  #define REQUIRES_FLOAT true
>  DEF_MVE_FUNCTION (vaddq, binary_opt_n, all_float, mx_or_none)
> +DEF_MVE_FUNCTION (vandq, binary, all_float, mx_or_none)
> +DEF_MVE_FUNCTION (veorq, binary, all_float, mx_or_none)
>  DEF_MVE_FUNCTION (vmulq, binary_opt_n, all_float, mx_or_none)
>  DEF_MVE_FUNCTION (vreinterpretq, unary_convert, reinterpret_float, none)
>  DEF_MVE_FUNCTION (vsubq, binary_opt_n, all_float, mx_or_none)
> diff --git a/gcc/config/arm/arm-mve-builtins-base.h b/gcc/config/arm/arm-
> mve-builtins-base.h
> index 30f8549c495..4fcf55715b6 100644
> --- a/gcc/config/arm/arm-mve-builtins-base.h
> +++ b/gcc/config/arm/arm-mve-builtins-base.h
> @@ -24,6 +24,8 @@ namespace arm_mve {
>  namespace functions {
> 
>  extern const function_base *const vaddq;
> +extern const function_base *const vandq;
> +extern const function_base *const veorq;
>  extern const function_base *const vmulq;
>  extern const function_base *const vreinterpretq;
>  extern const function_base *const vsubq;
> diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
> index 42a1af2ae15..0ad0122e44f 100644
> --- a/gcc/config/arm/arm_mve.h
> +++ b/gcc/config/arm/arm_mve.h
> @@ -77,14 +77,12 @@
>  #define vmaxq(__a, __b) __arm_vmaxq(__a, __b)
>  #define vhsubq(__a, __b) __arm_vhsubq(__a, __b)
>  #define vhaddq(__a, __b) __arm_vhaddq(__a, __b)
> -#define veorq(__a, __b) __arm_veorq(__a, __b)
>  #define vcmphiq(__a, __b) __arm_vcmphiq(__a, __b)
>  #define vcmpeqq(__a, __b) __arm_vcmpeqq(__a, __b)
>  #define vcmpcsq(__a, __b) __arm_vcmpcsq(__a, __b)
>  #define vcaddq_rot90(__a, __b) __arm_vcaddq_rot90(__a, __b)
>  #define vcaddq_rot270(__a, __b) __arm_vcaddq_rot270(__a, __b)
>  #define vbicq(__a, __b) __arm_vbicq(__a, __b)
> -#define vandq(__a, __b) __arm_vandq(__a, __b)
>  #define vaddvq_p(__a, __p) __arm_vaddvq_p(__a, __p)
>  #define vaddvaq(__a, __b) __arm_vaddvaq(__a, __b)
>  #define vabdq(__a, __b) __arm_vabdq(__a, __b)
> @@ -236,12 +234,10 @@
>  #define vabavq_p(__a, __b, __c, __p) __arm_vabavq_p(__a, __b, __c, __p)
>  #define vshlq_m(__inactive, __a, __b, __p) __arm_vshlq_m(__inactive, __a,
> __b, __p)
>  #define vabdq_m(__inactive, __a, __b, __p) __arm_vabdq_m(__inactive,
> __a, __b, __p)
> -#define vandq_m(__inactive, __a, __b, __p) __arm_vandq_m(__inactive,
> __a, __b, __p)
>  #define vbicq_m(__inactive, __a, __b, __p) __arm_vbicq_m(__inactive, __a,
> __b, __p)
>  #define vbrsrq_m(__inactive, __a, __b, __p) __arm_vbrsrq_m(__inactive,
> __a, __b, __p)
>  #define vcaddq_rot270_m(__inactive, __a, __b, __p)
> __arm_vcaddq_rot270_m(__inactive, __a, __b, __p)
>  #define vcaddq_rot90_m(__inactive, __a, __b, __p)
> __arm_vcaddq_rot90_m(__inactive, __a, __b, __p)
> -#define veorq_m(__inactive, __a, __b, __p) __arm_veorq_m(__inactive, __a,
> __b, __p)
>  #define vhaddq_m(__inactive, __a, __b, __p) __arm_vhaddq_m(__inactive,
> __a, __b, __p)
>  #define vhcaddq_rot270_m(__inactive, __a, __b, __p)
> __arm_vhcaddq_rot270_m(__inactive, __a, __b, __p)
>  #define vhcaddq_rot90_m(__inactive, __a, __b, __p)
> __arm_vhcaddq_rot90_m(__inactive, __a, __b, __p)
> @@ -404,10 +400,8 @@
>  #define vhsubq_x(__a, __b, __p) __arm_vhsubq_x(__a, __b, __p)
>  #define vrhaddq_x(__a, __b, __p) __arm_vrhaddq_x(__a, __b, __p)
>  #define vrmulhq_x(__a, __b, __p) __arm_vrmulhq_x(__a, __b, __p)
> -#define vandq_x(__a, __b, __p) __arm_vandq_x(__a, __b, __p)
>  #define vbicq_x(__a, __b, __p) __arm_vbicq_x(__a, __b, __p)
>  #define vbrsrq_x(__a, __b, __p) __arm_vbrsrq_x(__a, __b, __p)
> -#define veorq_x(__a, __b, __p) __arm_veorq_x(__a, __b, __p)
>  #define vmovlbq_x(__a, __p) __arm_vmovlbq_x(__a, __p)
>  #define vmovltq_x(__a, __p) __arm_vmovltq_x(__a, __p)
>  #define vmvnq_x(__a, __p) __arm_vmvnq_x(__a, __p)
> @@ -702,7 +696,6 @@
>  #define vhsubq_n_u8(__a, __b) __arm_vhsubq_n_u8(__a, __b)
>  #define vhaddq_u8(__a, __b) __arm_vhaddq_u8(__a, __b)
>  #define vhaddq_n_u8(__a, __b) __arm_vhaddq_n_u8(__a, __b)
> -#define veorq_u8(__a, __b) __arm_veorq_u8(__a, __b)
>  #define vcmpneq_n_u8(__a, __b) __arm_vcmpneq_n_u8(__a, __b)
>  #define vcmphiq_u8(__a, __b) __arm_vcmphiq_u8(__a, __b)
>  #define vcmphiq_n_u8(__a, __b) __arm_vcmphiq_n_u8(__a, __b)
> @@ -713,7 +706,6 @@
>  #define vcaddq_rot90_u8(__a, __b) __arm_vcaddq_rot90_u8(__a, __b)
>  #define vcaddq_rot270_u8(__a, __b) __arm_vcaddq_rot270_u8(__a, __b)
>  #define vbicq_u8(__a, __b) __arm_vbicq_u8(__a, __b)
> -#define vandq_u8(__a, __b) __arm_vandq_u8(__a, __b)
>  #define vaddvq_p_u8(__a, __p) __arm_vaddvq_p_u8(__a, __p)
>  #define vaddvaq_u8(__a, __b) __arm_vaddvaq_u8(__a, __b)
>  #define vabdq_u8(__a, __b) __arm_vabdq_u8(__a, __b)
> @@ -781,12 +773,10 @@
>  #define vhcaddq_rot270_s8(__a, __b) __arm_vhcaddq_rot270_s8(__a, __b)
>  #define vhaddq_s8(__a, __b) __arm_vhaddq_s8(__a, __b)
>  #define vhaddq_n_s8(__a, __b) __arm_vhaddq_n_s8(__a, __b)
> -#define veorq_s8(__a, __b) __arm_veorq_s8(__a, __b)
>  #define vcaddq_rot90_s8(__a, __b) __arm_vcaddq_rot90_s8(__a, __b)
>  #define vcaddq_rot270_s8(__a, __b) __arm_vcaddq_rot270_s8(__a, __b)
>  #define vbrsrq_n_s8(__a, __b) __arm_vbrsrq_n_s8(__a, __b)
>  #define vbicq_s8(__a, __b) __arm_vbicq_s8(__a, __b)
> -#define vandq_s8(__a, __b) __arm_vandq_s8(__a, __b)
>  #define vaddvaq_s8(__a, __b) __arm_vaddvaq_s8(__a, __b)
>  #define vabdq_s8(__a, __b) __arm_vabdq_s8(__a, __b)
>  #define vshlq_n_s8(__a,  __imm) __arm_vshlq_n_s8(__a,  __imm)
> @@ -812,7 +802,6 @@
>  #define vhsubq_n_u16(__a, __b) __arm_vhsubq_n_u16(__a, __b)
>  #define vhaddq_u16(__a, __b) __arm_vhaddq_u16(__a, __b)
>  #define vhaddq_n_u16(__a, __b) __arm_vhaddq_n_u16(__a, __b)
> -#define veorq_u16(__a, __b) __arm_veorq_u16(__a, __b)
>  #define vcmpneq_n_u16(__a, __b) __arm_vcmpneq_n_u16(__a, __b)
>  #define vcmphiq_u16(__a, __b) __arm_vcmphiq_u16(__a, __b)
>  #define vcmphiq_n_u16(__a, __b) __arm_vcmphiq_n_u16(__a, __b)
> @@ -823,7 +812,6 @@
>  #define vcaddq_rot90_u16(__a, __b) __arm_vcaddq_rot90_u16(__a, __b)
>  #define vcaddq_rot270_u16(__a, __b) __arm_vcaddq_rot270_u16(__a, __b)
>  #define vbicq_u16(__a, __b) __arm_vbicq_u16(__a, __b)
> -#define vandq_u16(__a, __b) __arm_vandq_u16(__a, __b)
>  #define vaddvq_p_u16(__a, __p) __arm_vaddvq_p_u16(__a, __p)
>  #define vaddvaq_u16(__a, __b) __arm_vaddvaq_u16(__a, __b)
>  #define vabdq_u16(__a, __b) __arm_vabdq_u16(__a, __b)
> @@ -891,12 +879,10 @@
>  #define vhcaddq_rot270_s16(__a, __b) __arm_vhcaddq_rot270_s16(__a,
> __b)
>  #define vhaddq_s16(__a, __b) __arm_vhaddq_s16(__a, __b)
>  #define vhaddq_n_s16(__a, __b) __arm_vhaddq_n_s16(__a, __b)
> -#define veorq_s16(__a, __b) __arm_veorq_s16(__a, __b)
>  #define vcaddq_rot90_s16(__a, __b) __arm_vcaddq_rot90_s16(__a, __b)
>  #define vcaddq_rot270_s16(__a, __b) __arm_vcaddq_rot270_s16(__a, __b)
>  #define vbrsrq_n_s16(__a, __b) __arm_vbrsrq_n_s16(__a, __b)
>  #define vbicq_s16(__a, __b) __arm_vbicq_s16(__a, __b)
> -#define vandq_s16(__a, __b) __arm_vandq_s16(__a, __b)
>  #define vaddvaq_s16(__a, __b) __arm_vaddvaq_s16(__a, __b)
>  #define vabdq_s16(__a, __b) __arm_vabdq_s16(__a, __b)
>  #define vshlq_n_s16(__a,  __imm) __arm_vshlq_n_s16(__a,  __imm)
> @@ -922,7 +908,6 @@
>  #define vhsubq_n_u32(__a, __b) __arm_vhsubq_n_u32(__a, __b)
>  #define vhaddq_u32(__a, __b) __arm_vhaddq_u32(__a, __b)
>  #define vhaddq_n_u32(__a, __b) __arm_vhaddq_n_u32(__a, __b)
> -#define veorq_u32(__a, __b) __arm_veorq_u32(__a, __b)
>  #define vcmpneq_n_u32(__a, __b) __arm_vcmpneq_n_u32(__a, __b)
>  #define vcmphiq_u32(__a, __b) __arm_vcmphiq_u32(__a, __b)
>  #define vcmphiq_n_u32(__a, __b) __arm_vcmphiq_n_u32(__a, __b)
> @@ -933,7 +918,6 @@
>  #define vcaddq_rot90_u32(__a, __b) __arm_vcaddq_rot90_u32(__a, __b)
>  #define vcaddq_rot270_u32(__a, __b) __arm_vcaddq_rot270_u32(__a, __b)
>  #define vbicq_u32(__a, __b) __arm_vbicq_u32(__a, __b)
> -#define vandq_u32(__a, __b) __arm_vandq_u32(__a, __b)
>  #define vaddvq_p_u32(__a, __p) __arm_vaddvq_p_u32(__a, __p)
>  #define vaddvaq_u32(__a, __b) __arm_vaddvaq_u32(__a, __b)
>  #define vabdq_u32(__a, __b) __arm_vabdq_u32(__a, __b)
> @@ -1001,12 +985,10 @@
>  #define vhcaddq_rot270_s32(__a, __b) __arm_vhcaddq_rot270_s32(__a,
> __b)
>  #define vhaddq_s32(__a, __b) __arm_vhaddq_s32(__a, __b)
>  #define vhaddq_n_s32(__a, __b) __arm_vhaddq_n_s32(__a, __b)
> -#define veorq_s32(__a, __b) __arm_veorq_s32(__a, __b)
>  #define vcaddq_rot90_s32(__a, __b) __arm_vcaddq_rot90_s32(__a, __b)
>  #define vcaddq_rot270_s32(__a, __b) __arm_vcaddq_rot270_s32(__a, __b)
>  #define vbrsrq_n_s32(__a, __b) __arm_vbrsrq_n_s32(__a, __b)
>  #define vbicq_s32(__a, __b) __arm_vbicq_s32(__a, __b)
> -#define vandq_s32(__a, __b) __arm_vandq_s32(__a, __b)
>  #define vaddvaq_s32(__a, __b) __arm_vaddvaq_s32(__a, __b)
>  #define vabdq_s32(__a, __b) __arm_vabdq_s32(__a, __b)
>  #define vshlq_n_s32(__a,  __imm) __arm_vshlq_n_s32(__a,  __imm)
> @@ -1059,7 +1041,6 @@
>  #define vmaxnmq_f16(__a, __b) __arm_vmaxnmq_f16(__a, __b)
>  #define vmaxnmavq_f16(__a, __b) __arm_vmaxnmavq_f16(__a, __b)
>  #define vmaxnmaq_f16(__a, __b) __arm_vmaxnmaq_f16(__a, __b)
> -#define veorq_f16(__a, __b) __arm_veorq_f16(__a, __b)
>  #define vcmulq_rot90_f16(__a, __b) __arm_vcmulq_rot90_f16(__a, __b)
>  #define vcmulq_rot270_f16(__a, __b) __arm_vcmulq_rot270_f16(__a, __b)
>  #define vcmulq_rot180_f16(__a, __b) __arm_vcmulq_rot180_f16(__a, __b)
> @@ -1067,7 +1048,6 @@
>  #define vcaddq_rot90_f16(__a, __b) __arm_vcaddq_rot90_f16(__a, __b)
>  #define vcaddq_rot270_f16(__a, __b) __arm_vcaddq_rot270_f16(__a, __b)
>  #define vbicq_f16(__a, __b) __arm_vbicq_f16(__a, __b)
> -#define vandq_f16(__a, __b) __arm_vandq_f16(__a, __b)
>  #define vabdq_f16(__a, __b) __arm_vabdq_f16(__a, __b)
>  #define vshlltq_n_s8(__a,  __imm) __arm_vshlltq_n_s8(__a,  __imm)
>  #define vshllbq_n_s8(__a,  __imm) __arm_vshllbq_n_s8(__a,  __imm)
> @@ -1120,7 +1100,6 @@
>  #define vmaxnmq_f32(__a, __b) __arm_vmaxnmq_f32(__a, __b)
>  #define vmaxnmavq_f32(__a, __b) __arm_vmaxnmavq_f32(__a, __b)
>  #define vmaxnmaq_f32(__a, __b) __arm_vmaxnmaq_f32(__a, __b)
> -#define veorq_f32(__a, __b) __arm_veorq_f32(__a, __b)
>  #define vcmulq_rot90_f32(__a, __b) __arm_vcmulq_rot90_f32(__a, __b)
>  #define vcmulq_rot270_f32(__a, __b) __arm_vcmulq_rot270_f32(__a, __b)
>  #define vcmulq_rot180_f32(__a, __b) __arm_vcmulq_rot180_f32(__a, __b)
> @@ -1128,7 +1107,6 @@
>  #define vcaddq_rot90_f32(__a, __b) __arm_vcaddq_rot90_f32(__a, __b)
>  #define vcaddq_rot270_f32(__a, __b) __arm_vcaddq_rot270_f32(__a, __b)
>  #define vbicq_f32(__a, __b) __arm_vbicq_f32(__a, __b)
> -#define vandq_f32(__a, __b) __arm_vandq_f32(__a, __b)
>  #define vabdq_f32(__a, __b) __arm_vabdq_f32(__a, __b)
>  #define vshlltq_n_s16(__a,  __imm) __arm_vshlltq_n_s16(__a,  __imm)
>  #define vshllbq_n_s16(__a,  __imm) __arm_vshllbq_n_s16(__a,  __imm)
> @@ -1662,12 +1640,6 @@
>  #define vabdq_m_u8(__inactive, __a, __b, __p)
> __arm_vabdq_m_u8(__inactive, __a, __b, __p)
>  #define vabdq_m_u32(__inactive, __a, __b, __p)
> __arm_vabdq_m_u32(__inactive, __a, __b, __p)
>  #define vabdq_m_u16(__inactive, __a, __b, __p)
> __arm_vabdq_m_u16(__inactive, __a, __b, __p)
> -#define vandq_m_s8(__inactive, __a, __b, __p)
> __arm_vandq_m_s8(__inactive, __a, __b, __p)
> -#define vandq_m_s32(__inactive, __a, __b, __p)
> __arm_vandq_m_s32(__inactive, __a, __b, __p)
> -#define vandq_m_s16(__inactive, __a, __b, __p)
> __arm_vandq_m_s16(__inactive, __a, __b, __p)
> -#define vandq_m_u8(__inactive, __a, __b, __p)
> __arm_vandq_m_u8(__inactive, __a, __b, __p)
> -#define vandq_m_u32(__inactive, __a, __b, __p)
> __arm_vandq_m_u32(__inactive, __a, __b, __p)
> -#define vandq_m_u16(__inactive, __a, __b, __p)
> __arm_vandq_m_u16(__inactive, __a, __b, __p)
>  #define vbicq_m_s8(__inactive, __a, __b, __p)
> __arm_vbicq_m_s8(__inactive, __a, __b, __p)
>  #define vbicq_m_s32(__inactive, __a, __b, __p)
> __arm_vbicq_m_s32(__inactive, __a, __b, __p)
>  #define vbicq_m_s16(__inactive, __a, __b, __p)
> __arm_vbicq_m_s16(__inactive, __a, __b, __p)
> @@ -1692,12 +1664,6 @@
>  #define vcaddq_rot90_m_u8(__inactive, __a, __b, __p)
> __arm_vcaddq_rot90_m_u8(__inactive, __a, __b, __p)
>  #define vcaddq_rot90_m_u32(__inactive, __a, __b, __p)
> __arm_vcaddq_rot90_m_u32(__inactive, __a, __b, __p)
>  #define vcaddq_rot90_m_u16(__inactive, __a, __b, __p)
> __arm_vcaddq_rot90_m_u16(__inactive, __a, __b, __p)
> -#define veorq_m_s8(__inactive, __a, __b, __p)
> __arm_veorq_m_s8(__inactive, __a, __b, __p)
> -#define veorq_m_s32(__inactive, __a, __b, __p)
> __arm_veorq_m_s32(__inactive, __a, __b, __p)
> -#define veorq_m_s16(__inactive, __a, __b, __p)
> __arm_veorq_m_s16(__inactive, __a, __b, __p)
> -#define veorq_m_u8(__inactive, __a, __b, __p)
> __arm_veorq_m_u8(__inactive, __a, __b, __p)
> -#define veorq_m_u32(__inactive, __a, __b, __p)
> __arm_veorq_m_u32(__inactive, __a, __b, __p)
> -#define veorq_m_u16(__inactive, __a, __b, __p)
> __arm_veorq_m_u16(__inactive, __a, __b, __p)
>  #define vhaddq_m_n_s8(__inactive, __a, __b, __p)
> __arm_vhaddq_m_n_s8(__inactive, __a, __b, __p)
>  #define vhaddq_m_n_s32(__inactive, __a, __b, __p)
> __arm_vhaddq_m_n_s32(__inactive, __a, __b, __p)
>  #define vhaddq_m_n_s16(__inactive, __a, __b, __p)
> __arm_vhaddq_m_n_s16(__inactive, __a, __b, __p)
> @@ -2006,8 +1972,6 @@
>  #define vshrntq_m_n_u16(__a, __b,  __imm, __p)
> __arm_vshrntq_m_n_u16(__a, __b,  __imm, __p)
>  #define vabdq_m_f32(__inactive, __a, __b, __p)
> __arm_vabdq_m_f32(__inactive, __a, __b, __p)
>  #define vabdq_m_f16(__inactive, __a, __b, __p)
> __arm_vabdq_m_f16(__inactive, __a, __b, __p)
> -#define vandq_m_f32(__inactive, __a, __b, __p)
> __arm_vandq_m_f32(__inactive, __a, __b, __p)
> -#define vandq_m_f16(__inactive, __a, __b, __p)
> __arm_vandq_m_f16(__inactive, __a, __b, __p)
>  #define vbicq_m_f32(__inactive, __a, __b, __p)
> __arm_vbicq_m_f32(__inactive, __a, __b, __p)
>  #define vbicq_m_f16(__inactive, __a, __b, __p)
> __arm_vbicq_m_f16(__inactive, __a, __b, __p)
>  #define vbrsrq_m_n_f32(__inactive, __a, __b, __p)
> __arm_vbrsrq_m_n_f32(__inactive, __a, __b, __p)
> @@ -2036,8 +2000,6 @@
>  #define vcvtq_m_n_s16_f16(__inactive, __a,  __imm6, __p)
> __arm_vcvtq_m_n_s16_f16(__inactive, __a,  __imm6, __p)
>  #define vcvtq_m_n_u32_f32(__inactive, __a,  __imm6, __p)
> __arm_vcvtq_m_n_u32_f32(__inactive, __a,  __imm6, __p)
>  #define vcvtq_m_n_u16_f16(__inactive, __a,  __imm6, __p)
> __arm_vcvtq_m_n_u16_f16(__inactive, __a,  __imm6, __p)
> -#define veorq_m_f32(__inactive, __a, __b, __p)
> __arm_veorq_m_f32(__inactive, __a, __b, __p)
> -#define veorq_m_f16(__inactive, __a, __b, __p)
> __arm_veorq_m_f16(__inactive, __a, __b, __p)
>  #define vfmaq_m_f32(__a, __b, __c, __p) __arm_vfmaq_m_f32(__a, __b,
> __c, __p)
>  #define vfmaq_m_f16(__a, __b, __c, __p) __arm_vfmaq_m_f16(__a, __b,
> __c, __p)
>  #define vfmaq_m_n_f32(__a, __b, __c, __p) __arm_vfmaq_m_n_f32(__a,
> __b, __c, __p)
> @@ -2467,12 +2429,6 @@
>  #define vrmulhq_x_u8(__a, __b, __p) __arm_vrmulhq_x_u8(__a, __b, __p)
>  #define vrmulhq_x_u16(__a, __b, __p) __arm_vrmulhq_x_u16(__a, __b,
> __p)
>  #define vrmulhq_x_u32(__a, __b, __p) __arm_vrmulhq_x_u32(__a, __b,
> __p)
> -#define vandq_x_s8(__a, __b, __p) __arm_vandq_x_s8(__a, __b, __p)
> -#define vandq_x_s16(__a, __b, __p) __arm_vandq_x_s16(__a, __b, __p)
> -#define vandq_x_s32(__a, __b, __p) __arm_vandq_x_s32(__a, __b, __p)
> -#define vandq_x_u8(__a, __b, __p) __arm_vandq_x_u8(__a, __b, __p)
> -#define vandq_x_u16(__a, __b, __p) __arm_vandq_x_u16(__a, __b, __p)
> -#define vandq_x_u32(__a, __b, __p) __arm_vandq_x_u32(__a, __b, __p)
>  #define vbicq_x_s8(__a, __b, __p) __arm_vbicq_x_s8(__a, __b, __p)
>  #define vbicq_x_s16(__a, __b, __p) __arm_vbicq_x_s16(__a, __b, __p)
>  #define vbicq_x_s32(__a, __b, __p) __arm_vbicq_x_s32(__a, __b, __p)
> @@ -2485,12 +2441,6 @@
>  #define vbrsrq_x_n_u8(__a, __b, __p) __arm_vbrsrq_x_n_u8(__a, __b, __p)
>  #define vbrsrq_x_n_u16(__a, __b, __p) __arm_vbrsrq_x_n_u16(__a, __b,
> __p)
>  #define vbrsrq_x_n_u32(__a, __b, __p) __arm_vbrsrq_x_n_u32(__a, __b,
> __p)
> -#define veorq_x_s8(__a, __b, __p) __arm_veorq_x_s8(__a, __b, __p)
> -#define veorq_x_s16(__a, __b, __p) __arm_veorq_x_s16(__a, __b, __p)
> -#define veorq_x_s32(__a, __b, __p) __arm_veorq_x_s32(__a, __b, __p)
> -#define veorq_x_u8(__a, __b, __p) __arm_veorq_x_u8(__a, __b, __p)
> -#define veorq_x_u16(__a, __b, __p) __arm_veorq_x_u16(__a, __b, __p)
> -#define veorq_x_u32(__a, __b, __p) __arm_veorq_x_u32(__a, __b, __p)
>  #define vmovlbq_x_s8(__a, __p) __arm_vmovlbq_x_s8(__a, __p)
>  #define vmovlbq_x_s16(__a, __p) __arm_vmovlbq_x_s16(__a, __p)
>  #define vmovlbq_x_u8(__a, __p) __arm_vmovlbq_x_u8(__a, __p)
> @@ -2641,14 +2591,10 @@
>  #define vrndaq_x_f32(__a, __p) __arm_vrndaq_x_f32(__a, __p)
>  #define vrndxq_x_f16(__a, __p) __arm_vrndxq_x_f16(__a, __p)
>  #define vrndxq_x_f32(__a, __p) __arm_vrndxq_x_f32(__a, __p)
> -#define vandq_x_f16(__a, __b, __p) __arm_vandq_x_f16(__a, __b, __p)
> -#define vandq_x_f32(__a, __b, __p) __arm_vandq_x_f32(__a, __b, __p)
>  #define vbicq_x_f16(__a, __b, __p) __arm_vbicq_x_f16(__a, __b, __p)
>  #define vbicq_x_f32(__a, __b, __p) __arm_vbicq_x_f32(__a, __b, __p)
>  #define vbrsrq_x_n_f16(__a, __b, __p) __arm_vbrsrq_x_n_f16(__a, __b,
> __p)
>  #define vbrsrq_x_n_f32(__a, __b, __p) __arm_vbrsrq_x_n_f32(__a, __b,
> __p)
> -#define veorq_x_f16(__a, __b, __p) __arm_veorq_x_f16(__a, __b, __p)
> -#define veorq_x_f32(__a, __b, __p) __arm_veorq_x_f32(__a, __b, __p)
>  #define vornq_x_f16(__a, __b, __p) __arm_vornq_x_f16(__a, __b, __p)
>  #define vornq_x_f32(__a, __b, __p) __arm_vornq_x_f32(__a, __b, __p)
>  #define vorrq_x_f16(__a, __b, __p) __arm_vorrq_x_f16(__a, __b, __p)
> @@ -3647,13 +3593,6 @@ __arm_vhaddq_n_u8 (uint8x16_t __a, uint8_t
> __b)
>    return __builtin_mve_vhaddq_n_uv16qi (__a, __b);
>  }
> 
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_u8 (uint8x16_t __a, uint8x16_t __b)
> -{
> -  return __builtin_mve_veorq_uv16qi (__a, __b);
> -}
> -
>  __extension__ extern __inline mve_pred16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcmpneq_n_u8 (uint8x16_t __a, uint8_t __b)
> @@ -3726,13 +3665,6 @@ __arm_vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
>    return __builtin_mve_vbicq_uv16qi (__a, __b);
>  }
> 
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_u8 (uint8x16_t __a, uint8x16_t __b)
> -{
> -  return __builtin_mve_vandq_uv16qi (__a, __b);
> -}
> -
>  __extension__ extern __inline uint32_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vaddvq_p_u8 (uint8x16_t __a, mve_pred16_t __p)
> @@ -4202,13 +4134,6 @@ __arm_vhaddq_n_s8 (int8x16_t __a, int8_t __b)
>    return __builtin_mve_vhaddq_n_sv16qi (__a, __b);
>  }
> 
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_s8 (int8x16_t __a, int8x16_t __b)
> -{
> -  return __builtin_mve_veorq_sv16qi (__a, __b);
> -}
> -
>  __extension__ extern __inline int8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b)
> @@ -4237,13 +4162,6 @@ __arm_vbicq_s8 (int8x16_t __a, int8x16_t __b)
>    return __builtin_mve_vbicq_sv16qi (__a, __b);
>  }
> 
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_s8 (int8x16_t __a, int8x16_t __b)
> -{
> -  return __builtin_mve_vandq_sv16qi (__a, __b);
> -}
> -
>  __extension__ extern __inline int32_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vaddvaq_s8 (int32_t __a, int8x16_t __b)
> @@ -4419,13 +4337,6 @@ __arm_vhaddq_n_u16 (uint16x8_t __a, uint16_t
> __b)
>    return __builtin_mve_vhaddq_n_uv8hi (__a, __b);
>  }
> 
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_u16 (uint16x8_t __a, uint16x8_t __b)
> -{
> -  return __builtin_mve_veorq_uv8hi (__a, __b);
> -}
> -
>  __extension__ extern __inline mve_pred16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcmpneq_n_u16 (uint16x8_t __a, uint16_t __b)
> @@ -4498,13 +4409,6 @@ __arm_vbicq_u16 (uint16x8_t __a, uint16x8_t
> __b)
>    return __builtin_mve_vbicq_uv8hi (__a, __b);
>  }
> 
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_u16 (uint16x8_t __a, uint16x8_t __b)
> -{
> -  return __builtin_mve_vandq_uv8hi (__a, __b);
> -}
> -
>  __extension__ extern __inline uint32_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vaddvq_p_u16 (uint16x8_t __a, mve_pred16_t __p)
> @@ -4974,13 +4878,6 @@ __arm_vhaddq_n_s16 (int16x8_t __a, int16_t
> __b)
>    return __builtin_mve_vhaddq_n_sv8hi (__a, __b);
>  }
> 
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_s16 (int16x8_t __a, int16x8_t __b)
> -{
> -  return __builtin_mve_veorq_sv8hi (__a, __b);
> -}
> -
>  __extension__ extern __inline int16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b)
> @@ -5009,13 +4906,6 @@ __arm_vbicq_s16 (int16x8_t __a, int16x8_t __b)
>    return __builtin_mve_vbicq_sv8hi (__a, __b);
>  }
> 
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_s16 (int16x8_t __a, int16x8_t __b)
> -{
> -  return __builtin_mve_vandq_sv8hi (__a, __b);
> -}
> -
>  __extension__ extern __inline int32_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vaddvaq_s16 (int32_t __a, int16x8_t __b)
> @@ -5191,13 +5081,6 @@ __arm_vhaddq_n_u32 (uint32x4_t __a, uint32_t
> __b)
>    return __builtin_mve_vhaddq_n_uv4si (__a, __b);
>  }
> 
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_u32 (uint32x4_t __a, uint32x4_t __b)
> -{
> -  return __builtin_mve_veorq_uv4si (__a, __b);
> -}
> -
>  __extension__ extern __inline mve_pred16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcmpneq_n_u32 (uint32x4_t __a, uint32_t __b)
> @@ -5270,13 +5153,6 @@ __arm_vbicq_u32 (uint32x4_t __a, uint32x4_t
> __b)
>    return __builtin_mve_vbicq_uv4si (__a, __b);
>  }
> 
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_u32 (uint32x4_t __a, uint32x4_t __b)
> -{
> -  return __builtin_mve_vandq_uv4si (__a, __b);
> -}
> -
>  __extension__ extern __inline uint32_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vaddvq_p_u32 (uint32x4_t __a, mve_pred16_t __p)
> @@ -5746,13 +5622,6 @@ __arm_vhaddq_n_s32 (int32x4_t __a, int32_t
> __b)
>    return __builtin_mve_vhaddq_n_sv4si (__a, __b);
>  }
> 
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_s32 (int32x4_t __a, int32x4_t __b)
> -{
> -  return __builtin_mve_veorq_sv4si (__a, __b);
> -}
> -
>  __extension__ extern __inline int32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b)
> @@ -5781,13 +5650,6 @@ __arm_vbicq_s32 (int32x4_t __a, int32x4_t __b)
>    return __builtin_mve_vbicq_sv4si (__a, __b);
>  }
> 
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_s32 (int32x4_t __a, int32x4_t __b)
> -{
> -  return __builtin_mve_vandq_sv4si (__a, __b);
> -}
> -
>  __extension__ extern __inline int32_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vaddvaq_s32 (int32_t __a, int32x4_t __b)
> @@ -9175,48 +9037,6 @@ __arm_vabdq_m_u16 (uint16x8_t __inactive,
> uint16x8_t __a, uint16x8_t __b, mve_pr
>    return __builtin_mve_vabdq_m_uv8hi (__inactive, __a, __b, __p);
>  }
> 
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_sv16qi (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
> mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_sv4si (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b,
> mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_sv8hi (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b,
> mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_uv16qi (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t
> __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_uv4si (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t
> __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_uv8hi (__inactive, __a, __b, __p);
> -}
> -
>  __extension__ extern __inline int8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vbicq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> @@ -9385,48 +9205,6 @@ __arm_vcaddq_rot90_m_u16 (uint16x8_t
> __inactive, uint16x8_t __a, uint16x8_t __b,
>    return __builtin_mve_vcaddq_rot90_m_uv8hi (__inactive, __a, __b, __p);
>  }
> 
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_sv16qi (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
> mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_sv4si (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b,
> mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_sv8hi (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b,
> mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_uv16qi (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b,
> mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_uv4si (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b,
> mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_uv8hi (__inactive, __a, __b, __p);
> -}
> -
>  __extension__ extern __inline int8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vhaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b,
> mve_pred16_t __p)
> @@ -14285,48 +14063,6 @@ __arm_vrmulhq_x_u32 (uint32x4_t __a,
> uint32x4_t __b, mve_pred16_t __p)
>    return __builtin_mve_vrmulhq_m_uv4si (__arm_vuninitializedq_u32 (),
> __a, __b, __p);
>  }
> 
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_sv16qi (__arm_vuninitializedq_s8 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_sv8hi (__arm_vuninitializedq_s16 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_sv4si (__arm_vuninitializedq_s32 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_uv16qi (__arm_vuninitializedq_u8 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_uv8hi (__arm_vuninitializedq_u16 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_uv4si (__arm_vuninitializedq_u32 (), __a,
> __b, __p);
> -}
> -
>  __extension__ extern __inline int8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vbicq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
> @@ -14411,48 +14147,6 @@ __arm_vbrsrq_x_n_u32 (uint32x4_t __a,
> int32_t __b, mve_pred16_t __p)
>    return __builtin_mve_vbrsrq_m_n_uv4si (__arm_vuninitializedq_u32 (),
> __a, __b, __p);
>  }
> 
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_sv16qi (__arm_vuninitializedq_s8 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_sv8hi (__arm_vuninitializedq_s16 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_sv4si (__arm_vuninitializedq_s32 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_uv16qi (__arm_vuninitializedq_u8 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_uv8hi (__arm_vuninitializedq_u16 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_uv4si (__arm_vuninitializedq_u32 (), __a,
> __b, __p);
> -}
> -
>  __extension__ extern __inline int16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vmovlbq_x_s8 (int8x16_t __a, mve_pred16_t __p)
> @@ -16300,13 +15994,6 @@ __arm_vmaxnmaq_f16 (float16x8_t __a,
> float16x8_t __b)
>    return __builtin_mve_vmaxnmaq_fv8hf (__a, __b);
>  }
> 
> -__extension__ extern __inline float16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_f16 (float16x8_t __a, float16x8_t __b)
> -{
> -  return __builtin_mve_veorq_fv8hf (__a, __b);
> -}
> -
>  __extension__ extern __inline float16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcmulq_rot90_f16 (float16x8_t __a, float16x8_t __b)
> @@ -16356,13 +16043,6 @@ __arm_vbicq_f16 (float16x8_t __a, float16x8_t
> __b)
>    return __builtin_mve_vbicq_fv8hf (__a, __b);
>  }
> 
> -__extension__ extern __inline float16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_f16 (float16x8_t __a, float16x8_t __b)
> -{
> -  return __builtin_mve_vandq_fv8hf (__a, __b);
> -}
> -
>  __extension__ extern __inline float16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vabdq_f16 (float16x8_t __a, float16x8_t __b)
> @@ -16524,13 +16204,6 @@ __arm_vmaxnmaq_f32 (float32x4_t __a,
> float32x4_t __b)
>    return __builtin_mve_vmaxnmaq_fv4sf (__a, __b);
>  }
> 
> -__extension__ extern __inline float32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_f32 (float32x4_t __a, float32x4_t __b)
> -{
> -  return __builtin_mve_veorq_fv4sf (__a, __b);
> -}
> -
>  __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcmulq_rot90_f32 (float32x4_t __a, float32x4_t __b)
> @@ -16580,13 +16253,6 @@ __arm_vbicq_f32 (float32x4_t __a, float32x4_t
> __b)
>    return __builtin_mve_vbicq_fv4sf (__a, __b);
>  }
> 
> -__extension__ extern __inline float32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_f32 (float32x4_t __a, float32x4_t __b)
> -{
> -  return __builtin_mve_vandq_fv4sf (__a, __b);
> -}
> -
>  __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vabdq_f32 (float32x4_t __a, float32x4_t __b)
> @@ -17372,20 +17038,6 @@ __arm_vabdq_m_f16 (float16x8_t __inactive,
> float16x8_t __a, float16x8_t __b, mve
>    return __builtin_mve_vabdq_m_fv8hf (__inactive, __a, __b, __p);
>  }
> 
> -__extension__ extern __inline float32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t
> __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_fv4sf (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline float16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t
> __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_fv8hf (__inactive, __a, __b, __p);
> -}
> -
>  __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vbicq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t
> __b, mve_pred16_t __p)
> @@ -17582,20 +17234,6 @@ __arm_vcvtq_m_n_u16_f16 (uint16x8_t
> __inactive, float16x8_t __a, const int __imm
>    return __builtin_mve_vcvtq_m_n_from_f_uv8hi (__inactive, __a, __imm6,
> __p);
>  }
> 
> -__extension__ extern __inline float32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t
> __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_fv4sf (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline float16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t
> __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_fv8hf (__inactive, __a, __b, __p);
> -}
> -
>  __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vfmaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c,
> mve_pred16_t __p)
> @@ -18456,20 +18094,6 @@ __arm_vrndxq_x_f32 (float32x4_t __a,
> mve_pred16_t __p)
>    return __builtin_mve_vrndxq_m_fv4sf (__arm_vuninitializedq_f32 (), __a,
> __p);
>  }
> 
> -__extension__ extern __inline float16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_fv8hf (__arm_vuninitializedq_f16 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline float32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_vandq_m_fv4sf (__arm_vuninitializedq_f32 (), __a,
> __b, __p);
> -}
> -
>  __extension__ extern __inline float16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vbicq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
> @@ -18498,20 +18122,6 @@ __arm_vbrsrq_x_n_f32 (float32x4_t __a,
> int32_t __b, mve_pred16_t __p)
>    return __builtin_mve_vbrsrq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a,
> __b, __p);
>  }
> 
> -__extension__ extern __inline float16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_fv8hf (__arm_vuninitializedq_f16 (), __a,
> __b, __p);
> -}
> -
> -__extension__ extern __inline float32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
> -{
> -  return __builtin_mve_veorq_m_fv4sf (__arm_vuninitializedq_f32 (), __a,
> __b, __p);
> -}
> -
>  __extension__ extern __inline float16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vornq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
> @@ -19428,13 +19038,6 @@ __arm_vhaddq (uint8x16_t __a, uint8_t __b)
>   return __arm_vhaddq_n_u8 (__a, __b);
>  }
> 
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq (uint8x16_t __a, uint8x16_t __b)
> -{
> - return __arm_veorq_u8 (__a, __b);
> -}
> -
>  __extension__ extern __inline mve_pred16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcmpneq (uint8x16_t __a, uint8_t __b)
> @@ -19505,13 +19108,6 @@ __arm_vbicq (uint8x16_t __a, uint8x16_t __b)
>   return __arm_vbicq_u8 (__a, __b);
>  }
> 
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq (uint8x16_t __a, uint8x16_t __b)
> -{
> - return __arm_vandq_u8 (__a, __b);
> -}
> -
>  __extension__ extern __inline uint32_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vaddvq_p (uint8x16_t __a, mve_pred16_t __p)
> @@ -19981,13 +19577,6 @@ __arm_vhaddq (int8x16_t __a, int8_t __b)
>   return __arm_vhaddq_n_s8 (__a, __b);
>  }
> 
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq (int8x16_t __a, int8x16_t __b)
> -{
> - return __arm_veorq_s8 (__a, __b);
> -}
> -
>  __extension__ extern __inline int8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcaddq_rot90 (int8x16_t __a, int8x16_t __b)
> @@ -20016,13 +19605,6 @@ __arm_vbicq (int8x16_t __a, int8x16_t __b)
>   return __arm_vbicq_s8 (__a, __b);
>  }
> 
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq (int8x16_t __a, int8x16_t __b)
> -{
> - return __arm_vandq_s8 (__a, __b);
> -}
> -
>  __extension__ extern __inline int32_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vaddvaq (int32_t __a, int8x16_t __b)
> @@ -20198,13 +19780,6 @@ __arm_vhaddq (uint16x8_t __a, uint16_t __b)
>   return __arm_vhaddq_n_u16 (__a, __b);
>  }
> 
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq (uint16x8_t __a, uint16x8_t __b)
> -{
> - return __arm_veorq_u16 (__a, __b);
> -}
> -
>  __extension__ extern __inline mve_pred16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcmpneq (uint16x8_t __a, uint16_t __b)
> @@ -20275,13 +19850,6 @@ __arm_vbicq (uint16x8_t __a, uint16x8_t __b)
>   return __arm_vbicq_u16 (__a, __b);
>  }
> 
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq (uint16x8_t __a, uint16x8_t __b)
> -{
> - return __arm_vandq_u16 (__a, __b);
> -}
> -
>  __extension__ extern __inline uint32_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vaddvq_p (uint16x8_t __a, mve_pred16_t __p)
> @@ -20751,13 +20319,6 @@ __arm_vhaddq (int16x8_t __a, int16_t __b)
>   return __arm_vhaddq_n_s16 (__a, __b);
>  }
> 
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq (int16x8_t __a, int16x8_t __b)
> -{
> - return __arm_veorq_s16 (__a, __b);
> -}
> -
>  __extension__ extern __inline int16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcaddq_rot90 (int16x8_t __a, int16x8_t __b)
> @@ -20786,13 +20347,6 @@ __arm_vbicq (int16x8_t __a, int16x8_t __b)
>   return __arm_vbicq_s16 (__a, __b);
>  }
> 
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq (int16x8_t __a, int16x8_t __b)
> -{
> - return __arm_vandq_s16 (__a, __b);
> -}
> -
>  __extension__ extern __inline int32_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vaddvaq (int32_t __a, int16x8_t __b)
> @@ -20968,13 +20522,6 @@ __arm_vhaddq (uint32x4_t __a, uint32_t __b)
>   return __arm_vhaddq_n_u32 (__a, __b);
>  }
> 
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq (uint32x4_t __a, uint32x4_t __b)
> -{
> - return __arm_veorq_u32 (__a, __b);
> -}
> -
>  __extension__ extern __inline mve_pred16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcmpneq (uint32x4_t __a, uint32_t __b)
> @@ -21045,13 +20592,6 @@ __arm_vbicq (uint32x4_t __a, uint32x4_t __b)
>   return __arm_vbicq_u32 (__a, __b);
>  }
> 
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq (uint32x4_t __a, uint32x4_t __b)
> -{
> - return __arm_vandq_u32 (__a, __b);
> -}
> -
>  __extension__ extern __inline uint32_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vaddvq_p (uint32x4_t __a, mve_pred16_t __p)
> @@ -21521,13 +21061,6 @@ __arm_vhaddq (int32x4_t __a, int32_t __b)
>   return __arm_vhaddq_n_s32 (__a, __b);
>  }
> 
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq (int32x4_t __a, int32x4_t __b)
> -{
> - return __arm_veorq_s32 (__a, __b);
> -}
> -
>  __extension__ extern __inline int32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcaddq_rot90 (int32x4_t __a, int32x4_t __b)
> @@ -21556,13 +21089,6 @@ __arm_vbicq (int32x4_t __a, int32x4_t __b)
>   return __arm_vbicq_s32 (__a, __b);
>  }
> 
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq (int32x4_t __a, int32x4_t __b)
> -{
> - return __arm_vandq_s32 (__a, __b);
> -}
> -
>  __extension__ extern __inline int32_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vaddvaq (int32_t __a, int32x4_t __b)
> @@ -24909,48 +24435,6 @@ __arm_vabdq_m (uint16x8_t __inactive,
> uint16x8_t __a, uint16x8_t __b, mve_pred16
>   return __arm_vabdq_m_u16 (__inactive, __a, __b, __p);
>  }
> 
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vandq_m_s8 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vandq_m_s32 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vandq_m_s16 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vandq_m_u8 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vandq_m_u32 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vandq_m_u16 (__inactive, __a, __b, __p);
> -}
> -
>  __extension__ extern __inline int8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vbicq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> @@ -25119,48 +24603,6 @@ __arm_vcaddq_rot90_m (uint16x8_t __inactive,
> uint16x8_t __a, uint16x8_t __b, mve
>   return __arm_vcaddq_rot90_m_u16 (__inactive, __a, __b, __p);
>  }
> 
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_veorq_m_s8 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_veorq_m_s32 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_veorq_m_s16 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_veorq_m_u8 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_veorq_m_u32 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_veorq_m_u16 (__inactive, __a, __b, __p);
> -}
> -
>  __extension__ extern __inline int8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vhaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b,
> mve_pred16_t __p)
> @@ -29550,48 +28992,6 @@ __arm_vrmulhq_x (uint32x4_t __a, uint32x4_t
> __b, mve_pred16_t __p)
>   return __arm_vrmulhq_x_u32 (__a, __b, __p);
>  }
> 
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
> -{
> - return __arm_vandq_x_s8 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
> -{
> - return __arm_vandq_x_s16 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
> -{
> - return __arm_vandq_x_s32 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
> -{
> - return __arm_vandq_x_u8 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
> -{
> - return __arm_vandq_x_u16 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
> -{
> - return __arm_vandq_x_u32 (__a, __b, __p);
> -}
> -
>  __extension__ extern __inline int8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vbicq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
> @@ -29676,48 +29076,6 @@ __arm_vbrsrq_x (uint32x4_t __a, int32_t __b,
> mve_pred16_t __p)
>   return __arm_vbrsrq_x_n_u32 (__a, __b, __p);
>  }
> 
> -__extension__ extern __inline int8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
> -{
> - return __arm_veorq_x_s8 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline int16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
> -{
> - return __arm_veorq_x_s16 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
> -{
> - return __arm_veorq_x_s32 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint8x16_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
> -{
> - return __arm_veorq_x_u8 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
> -{
> - return __arm_veorq_x_u16 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
> -{
> - return __arm_veorq_x_u32 (__a, __b, __p);
> -}
> -
>  __extension__ extern __inline int16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vmovlbq_x (int8x16_t __a, mve_pred16_t __p)
> @@ -31127,13 +30485,6 @@ __arm_vmaxnmaq (float16x8_t __a,
> float16x8_t __b)
>   return __arm_vmaxnmaq_f16 (__a, __b);
>  }
> 
> -__extension__ extern __inline float16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq (float16x8_t __a, float16x8_t __b)
> -{
> - return __arm_veorq_f16 (__a, __b);
> -}
> -
>  __extension__ extern __inline float16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcmulq_rot90 (float16x8_t __a, float16x8_t __b)
> @@ -31183,13 +30534,6 @@ __arm_vbicq (float16x8_t __a, float16x8_t __b)
>   return __arm_vbicq_f16 (__a, __b);
>  }
> 
> -__extension__ extern __inline float16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq (float16x8_t __a, float16x8_t __b)
> -{
> - return __arm_vandq_f16 (__a, __b);
> -}
> -
>  __extension__ extern __inline float16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vabdq (float16x8_t __a, float16x8_t __b)
> @@ -31351,13 +30695,6 @@ __arm_vmaxnmaq (float32x4_t __a,
> float32x4_t __b)
>   return __arm_vmaxnmaq_f32 (__a, __b);
>  }
> 
> -__extension__ extern __inline float32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq (float32x4_t __a, float32x4_t __b)
> -{
> - return __arm_veorq_f32 (__a, __b);
> -}
> -
>  __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vcmulq_rot90 (float32x4_t __a, float32x4_t __b)
> @@ -31407,13 +30744,6 @@ __arm_vbicq (float32x4_t __a, float32x4_t __b)
>   return __arm_vbicq_f32 (__a, __b);
>  }
> 
> -__extension__ extern __inline float32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq (float32x4_t __a, float32x4_t __b)
> -{
> - return __arm_vandq_f32 (__a, __b);
> -}
> -
>  __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vabdq (float32x4_t __a, float32x4_t __b)
> @@ -32184,20 +31514,6 @@ __arm_vabdq_m (float16x8_t __inactive,
> float16x8_t __a, float16x8_t __b, mve_pre
>   return __arm_vabdq_m_f16 (__inactive, __a, __b, __p);
>  }
> 
> -__extension__ extern __inline float32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vandq_m_f32 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline float16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_vandq_m_f16 (__inactive, __a, __b, __p);
> -}
> -
>  __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vbicq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b,
> mve_pred16_t __p)
> @@ -32394,20 +31710,6 @@ __arm_vcvtq_m_n (uint16x8_t __inactive,
> float16x8_t __a, const int __imm6, mve_p
>   return __arm_vcvtq_m_n_u16_f16 (__inactive, __a, __imm6, __p);
>  }
> 
> -__extension__ extern __inline float32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_veorq_m_f32 (__inactive, __a, __b, __p);
> -}
> -
> -__extension__ extern __inline float16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b,
> mve_pred16_t __p)
> -{
> - return __arm_veorq_m_f16 (__inactive, __a, __b, __p);
> -}
> -
>  __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vfmaq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c,
> mve_pred16_t __p)
> @@ -33010,20 +32312,6 @@ __arm_vrndxq_x (float32x4_t __a,
> mve_pred16_t __p)
>   return __arm_vrndxq_x_f32 (__a, __p);
>  }
> 
> -__extension__ extern __inline float16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
> -{
> - return __arm_vandq_x_f16 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline float32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vandq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
> -{
> - return __arm_vandq_x_f32 (__a, __b, __p);
> -}
> -
>  __extension__ extern __inline float16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vbicq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
> @@ -33052,20 +32340,6 @@ __arm_vbrsrq_x (float32x4_t __a, int32_t __b,
> mve_pred16_t __p)
>   return __arm_vbrsrq_x_n_f32 (__a, __b, __p);
>  }
> 
> -__extension__ extern __inline float16x8_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
> -{
> - return __arm_veorq_x_f16 (__a, __b, __p);
> -}
> -
> -__extension__ extern __inline float32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_veorq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
> -{
> - return __arm_veorq_x_f32 (__a, __b, __p);
> -}
> -
>  __extension__ extern __inline float16x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vornq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
> @@ -33678,18 +32952,6 @@ extern void *__ARM_undef;
>    int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]:
> __arm_vabdq_f16 (__ARM_mve_coerce(__p0, float16x8_t),
> __ARM_mve_coerce(__p1, float16x8_t)), \
>    int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]:
> __arm_vabdq_f32 (__ARM_mve_coerce(__p0, float32x4_t),
> __ARM_mve_coerce(__p1, float32x4_t)));})
> 
> -#define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
> -  __typeof(p1) __p1 = (p1); \
> -  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> -  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t)), \
> -  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t),
> __ARM_mve_coerce(__p1, int16x8_t)), \
> -  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t),
> __ARM_mve_coerce(__p1, int32x4_t)), \
> -  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
> __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce(__p1, uint8x16_t)), \
> -  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, uint16x8_t)), \
> -  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, uint32x4_t)), \
> -  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]:
> __arm_vandq_f16 (__ARM_mve_coerce(__p0, float16x8_t),
> __ARM_mve_coerce(__p1, float16x8_t)), \
> -  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]:
> __arm_vandq_f32 (__ARM_mve_coerce(__p0, float32x4_t),
> __ARM_mve_coerce(__p1, float32x4_t)));})
> -
>  #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
>    __typeof(p1) __p1 = (p1); \
>    _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> @@ -33868,18 +33130,6 @@ extern void *__ARM_undef;
>    int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]:
> __arm_vcmulq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t),
> __ARM_mve_coerce(__p1, float16x8_t)), \
>    int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]:
> __arm_vcmulq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t),
> __ARM_mve_coerce(__p1, float32x4_t)));})
> 
> -#define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
> -  __typeof(p1) __p1 = (p1); \
> -  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> -  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t)), \
> -  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t),
> __ARM_mve_coerce(__p1, int16x8_t)), \
> -  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t),
> __ARM_mve_coerce(__p1, int32x4_t)), \
> -  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
> __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce(__p1, uint8x16_t)), \
> -  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, uint16x8_t)), \
> -  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, uint32x4_t)), \
> -  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]:
> __arm_veorq_f16 (__ARM_mve_coerce(__p0, float16x8_t),
> __ARM_mve_coerce(__p1, float16x8_t)), \
> -  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]:
> __arm_veorq_f32 (__ARM_mve_coerce(__p0, float32x4_t),
> __ARM_mve_coerce(__p1, float32x4_t)));})
> -
>  #define __arm_vmaxnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
>    __typeof(p1) __p1 = (p1); \
>    _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> @@ -35060,19 +34310,6 @@ extern void *__ARM_undef;
>    int
> (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_
> mve_type_float16x8_t]: __arm_vabdq_m_f16 (__ARM_mve_coerce(__p0,
> float16x8_t), __ARM_mve_coerce(__p1, float16x8_t),
> __ARM_mve_coerce(__p2, float16x8_t), p3), \
>    int
> (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_
> mve_type_float32x4_t]: __arm_vabdq_m_f32 (__ARM_mve_coerce(__p0,
> float32x4_t), __ARM_mve_coerce(__p1, float32x4_t),
> __ARM_mve_coerce(__p2, float32x4_t), p3));})
> 
> -#define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
> -  __typeof(p1) __p1 = (p1); \
> -  __typeof(p2) __p2 = (p2); \
> -  _Generic( (int
> (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typ
> eid(__p2)])0, \
> -  int
> (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve
> _type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0,
> int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2,
> int8x16_t), p3), \
> -  int
> (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve
> _type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0,
> int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2,
> int16x8_t), p3), \
> -  int
> (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve
> _type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0,
> int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2,
> int32x4_t), p3), \
> -  int
> (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_m
> ve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0,
> uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t),
> __ARM_mve_coerce(__p2, uint8x16_t), p3), \
> -  int
> (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_m
> ve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0,
> uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, uint16x8_t), p3), \
> -  int
> (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_m
> ve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0,
> uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, uint32x4_t), p3), \
> -  int
> (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_
> mve_type_float16x8_t]: __arm_vandq_m_f16 (__ARM_mve_coerce(__p0,
> float16x8_t), __ARM_mve_coerce(__p1, float16x8_t),
> __ARM_mve_coerce(__p2, float16x8_t), p3), \
> -  int
> (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_
> mve_type_float32x4_t]: __arm_vandq_m_f32 (__ARM_mve_coerce(__p0,
> float32x4_t), __ARM_mve_coerce(__p1, float32x4_t),
> __ARM_mve_coerce(__p2, float32x4_t), p3));})
> -
>  #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
>    __typeof(p1) __p1 = (p1); \
>    __typeof(p2) __p2 = (p2); \
> @@ -35180,19 +34417,6 @@ extern void *__ARM_undef;
>    int
> (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_
> mve_type_float16x8_t]:
> __arm_vcmulq_rot90_m_f16(__ARM_mve_coerce(__p0, float16x8_t),
> __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2,
> float16x8_t), p3), \
>    int
> (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_
> mve_type_float32x4_t]:
> __arm_vcmulq_rot90_m_f32(__ARM_mve_coerce(__p0, float32x4_t),
> __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2,
> float32x4_t), p3));})
> 
> -#define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
> -  __typeof(p1) __p1 = (p1); \
> -  __typeof(p2) __p2 = (p2); \
> -  _Generic( (int
> (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typ
> eid(__p2)])0, \
> -  int
> (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve
> _type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t),
> p3), \
> -  int
> (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve
> _type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0,
> int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2,
> int16x8_t), p3), \
> -  int
> (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve
> _type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0,
> int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2,
> int32x4_t), p3), \
> -  int
> (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_m
> ve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0,
> uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t),
> __ARM_mve_coerce(__p2, uint8x16_t), p3), \
> -  int
> (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_m
> ve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0,
> uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, uint16x8_t), p3), \
> -  int
> (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_m
> ve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0,
> uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, uint32x4_t), p3), \
> -  int
> (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_
> mve_type_float16x8_t]: __arm_veorq_m_f16 (__ARM_mve_coerce(__p0,
> float16x8_t), __ARM_mve_coerce(__p1, float16x8_t),
> __ARM_mve_coerce(__p2, float16x8_t), p3), \
> -  int
> (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_
> mve_type_float32x4_t]: __arm_veorq_m_f32 (__ARM_mve_coerce(__p0,
> float32x4_t), __ARM_mve_coerce(__p1, float32x4_t),
> __ARM_mve_coerce(__p2, float32x4_t), p3));})
> -
>  #define __arm_vfmaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
>    __typeof(p1) __p1 = (p1); \
>    __typeof(p2) __p2 = (p2); \
> @@ -35588,18 +34812,6 @@ extern void *__ARM_undef;
>    int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_x_f16
> (__ARM_mve_coerce(__p1, float16x8_t), p2), \
>    int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_x_f32
> (__ARM_mve_coerce(__p1, float32x4_t), p2));})
> 
> -#define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
> -  __typeof(p2) __p2 = (p2); \
> -  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0,
> \
> -  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vandq_x_s8  (__ARM_mve_coerce(__p1, int8x16_t),
> __ARM_mve_coerce(__p2, int8x16_t), p3), \
> -  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t),
> __ARM_mve_coerce(__p2, int16x8_t), p3), \
> -  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t),
> __ARM_mve_coerce(__p2, int32x4_t), p3), \
> -  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
> __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t),
> __ARM_mve_coerce(__p2, uint8x16_t), p3), \
> -  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, uint16x8_t), p3), \
> -  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, uint32x4_t), p3), \
> -  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]:
> __arm_vandq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t),
> __ARM_mve_coerce(__p2, float16x8_t), p3), \
> -  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]:
> __arm_vandq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t),
> __ARM_mve_coerce(__p2, float32x4_t), p3));})
> -
>  #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
>    __typeof(p2) __p2 = (p2); \
>    _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0,
> \
> @@ -35679,18 +34891,6 @@ extern void *__ARM_undef;
>    int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_n_f16_u16
> (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
>    int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_n_f32_u32
> (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
> 
> -#define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
> -  __typeof(p2) __p2 = (p2); \
> -  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0,
> \
> -  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t),
> __ARM_mve_coerce(__p2, int8x16_t), p3), \
> -  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t),
> __ARM_mve_coerce(__p2, int16x8_t), p3), \
> -  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t),
> __ARM_mve_coerce(__p2, int32x4_t), p3), \
> -  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
> __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t),
> __ARM_mve_coerce(__p2, uint8x16_t), p3), \
> -  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, uint16x8_t), p3), \
> -  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, uint32x4_t), p3), \
> -  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]:
> __arm_veorq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t),
> __ARM_mve_coerce(__p2, float16x8_t), p3), \
> -  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]:
> __arm_veorq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t),
> __ARM_mve_coerce(__p2, float32x4_t), p3));})
> -
>  #define __arm_vmaxnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
>    __typeof(p2) __p2 = (p2); \
>    _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0,
> \
> @@ -36251,16 +35451,6 @@ extern void *__ARM_undef;
>    int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, uint16x8_t)), \
>    int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, uint32x4_t)));})
> 
> -#define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
> -  __typeof(p1) __p1 = (p1); \
> -  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> -  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t)), \
> -  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t),
> __ARM_mve_coerce(__p1, int16x8_t)), \
> -  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t),
> __ARM_mve_coerce(__p1, int32x4_t)), \
> -  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
> __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce(__p1, uint8x16_t)), \
> -  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, uint16x8_t)), \
> -  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, uint32x4_t)));})
> -
>  #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
>    __typeof(p1) __p1 = (p1); \
>    _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> @@ -36304,16 +35494,6 @@ extern void *__ARM_undef;
>    int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, uint16x8_t)), \
>    int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, uint32x4_t)));})
> 
> -#define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
> -  __typeof(p1) __p1 = (p1); \
> -  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> -  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t)), \
> -  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t),
> __ARM_mve_coerce(__p1, int16x8_t)), \
> -  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t),
> __ARM_mve_coerce(__p1, int32x4_t)), \
> -  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
> __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t),
> __ARM_mve_coerce(__p1, uint8x16_t)), \
> -  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t),
> __ARM_mve_coerce(__p1, uint16x8_t)), \
> -  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t),
> __ARM_mve_coerce(__p1, uint32x4_t)));})
> -
>  #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
>    __typeof(p1) __p1 = (p1); \
>    _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0,
> \
> @@ -36998,17 +36178,6 @@ extern void *__ARM_undef;
>    int
> (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_m
> ve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0,
> uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, uint16x8_t), p3), \
>    int
> (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_m
> ve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0,
> uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, uint32x4_t), p3));})
> 
> -#define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
> -  __typeof(p1) __p1 = (p1); \
> -  __typeof(p2) __p2 = (p2); \
> -  _Generic( (int
> (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typ
> eid(__p2)])0, \
> -  int
> (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve
> _type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0,
> int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2,
> int8x16_t), p3), \
> -  int
> (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve
> _type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0,
> int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2,
> int16x8_t), p3), \
> -  int
> (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve
> _type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0,
> int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2,
> int32x4_t), p3), \
> -  int
> (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_m
> ve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0,
> uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t),
> __ARM_mve_coerce(__p2, uint8x16_t), p3), \
> -  int
> (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_m
> ve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0,
> uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, uint16x8_t), p3), \
> -  int
> (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_m
> ve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0,
> uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, uint32x4_t), p3));})
> -
>  #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
>    __typeof(p1) __p1 = (p1); \
>    __typeof(p2) __p2 = (p2); \
> @@ -37053,17 +36222,6 @@ extern void *__ARM_undef;
>    int
> (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_m
> ve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16
> (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1,
> uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
>    int
> (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_m
> ve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32
> (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1,
> uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
> 
> -#define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
> -  __typeof(p1) __p1 = (p1); \
> -  __typeof(p2) __p2 = (p2); \
> -  _Generic( (int
> (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typ
> eid(__p2)])0, \
> -  int
> (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve
> _type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t),
> __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t),
> p3), \
> -  int
> (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve
> _type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0,
> int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2,
> int16x8_t), p3), \
> -  int
> (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve
> _type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0,
> int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2,
> int32x4_t), p3), \
> -  int
> (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_m
> ve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0,
> uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t),
> __ARM_mve_coerce(__p2, uint8x16_t), p3), \
> -  int
> (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_m
> ve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0,
> uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, uint16x8_t), p3), \
> -  int
> (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_m
> ve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0,
> uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, uint32x4_t), p3));})
> -
>  #define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
>    __typeof(p1) __p1 = (p1); \
>    __typeof(p2) __p2 = (p2); \
> @@ -37360,16 +36518,6 @@ extern void *__ARM_undef;
>    int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, uint16x8_t), p3), \
>    int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, uint32x4_t), p3));})
> 
> -#define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
> -  __typeof(p2) __p2 = (p2); \
> -  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0,
> \
> -  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t),
> __ARM_mve_coerce(__p2, int8x16_t), p3), \
> -  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t),
> __ARM_mve_coerce(__p2, int16x8_t), p3), \
> -  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t),
> __ARM_mve_coerce(__p2, int32x4_t), p3), \
> -  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
> __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t),
> __ARM_mve_coerce(__p2, uint8x16_t), p3), \
> -  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, uint16x8_t), p3), \
> -  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, uint32x4_t), p3));})
> -
>  #define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
>    _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
>    int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8
> (__ARM_mve_coerce(__p1, int8x16_t), p2), \
> @@ -37478,16 +36626,6 @@ extern void *__ARM_undef;
>    int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, uint16x8_t), p3), \
>    int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, uint32x4_t), p3));})
> 
> -#define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
> -  __typeof(p2) __p2 = (p2); \
> -  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0,
> \
> -  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]:
> __arm_vandq_x_s8  (__ARM_mve_coerce(__p1, int8x16_t),
> __ARM_mve_coerce(__p2, int8x16_t), p3), \
> -  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]:
> __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t),
> __ARM_mve_coerce(__p2, int16x8_t), p3), \
> -  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]:
> __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t),
> __ARM_mve_coerce(__p2, int32x4_t), p3), \
> -  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
> __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t),
> __ARM_mve_coerce(__p2, uint8x16_t), p3), \
> -  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
> __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t),
> __ARM_mve_coerce(__p2, uint16x8_t), p3), \
> -  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
> __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t),
> __ARM_mve_coerce(__p2, uint32x4_t), p3));})
> -
>  #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
>    __typeof(p2) __p2 = (p2); \
>    _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0,
> \
> --
> 2.34.1
diff mbox series

Patch

diff --git a/gcc/config/arm/arm-mve-builtins-base.cc b/gcc/config/arm/arm-mve-builtins-base.cc
index 48b09bffd0c..51fed8f671f 100644
--- a/gcc/config/arm/arm-mve-builtins-base.cc
+++ b/gcc/config/arm/arm-mve-builtins-base.cc
@@ -90,7 +90,17 @@  namespace arm_mve {
     UNSPEC##_M_S, UNSPEC##_M_U, UNSPEC##_M_F,				\
     UNSPEC##_M_N_S, UNSPEC##_M_N_U, UNSPEC##_M_N_F))
 
+  /* Helper for builtins with RTX codes, and _m predicated overrides.  */
+#define FUNCTION_WITH_RTX_M(NAME, RTX, UNSPEC) FUNCTION			\
+  (NAME, unspec_based_mve_function_exact_insn,				\
+   (RTX, RTX, RTX,							\
+    -1, -1, -1,								\
+    UNSPEC##_M_S, UNSPEC##_M_U, UNSPEC##_M_F,				\
+    -1, -1, -1))
+
 FUNCTION_WITH_RTX_M_N (vaddq, PLUS, VADDQ)
+FUNCTION_WITH_RTX_M (vandq, AND, VANDQ)
+FUNCTION_WITH_RTX_M (veorq, XOR, VEORQ)
 FUNCTION_WITH_RTX_M_N (vmulq, MULT, VMULQ)
 FUNCTION (vreinterpretq, vreinterpretq_impl,)
 FUNCTION_WITH_RTX_M_N (vsubq, MINUS, VSUBQ)
diff --git a/gcc/config/arm/arm-mve-builtins-base.def b/gcc/config/arm/arm-mve-builtins-base.def
index 624558c08b2..a933c9fc91e 100644
--- a/gcc/config/arm/arm-mve-builtins-base.def
+++ b/gcc/config/arm/arm-mve-builtins-base.def
@@ -19,6 +19,8 @@ 
 
 #define REQUIRES_FLOAT false
 DEF_MVE_FUNCTION (vaddq, binary_opt_n, all_integer, mx_or_none)
+DEF_MVE_FUNCTION (vandq, binary, all_integer, mx_or_none)
+DEF_MVE_FUNCTION (veorq, binary, all_integer, mx_or_none)
 DEF_MVE_FUNCTION (vmulq, binary_opt_n, all_integer, mx_or_none)
 DEF_MVE_FUNCTION (vreinterpretq, unary_convert, reinterpret_integer, none)
 DEF_MVE_FUNCTION (vsubq, binary_opt_n, all_integer, mx_or_none)
@@ -27,6 +29,8 @@  DEF_MVE_FUNCTION (vuninitializedq, inherent, all_integer_with_64, none)
 
 #define REQUIRES_FLOAT true
 DEF_MVE_FUNCTION (vaddq, binary_opt_n, all_float, mx_or_none)
+DEF_MVE_FUNCTION (vandq, binary, all_float, mx_or_none)
+DEF_MVE_FUNCTION (veorq, binary, all_float, mx_or_none)
 DEF_MVE_FUNCTION (vmulq, binary_opt_n, all_float, mx_or_none)
 DEF_MVE_FUNCTION (vreinterpretq, unary_convert, reinterpret_float, none)
 DEF_MVE_FUNCTION (vsubq, binary_opt_n, all_float, mx_or_none)
diff --git a/gcc/config/arm/arm-mve-builtins-base.h b/gcc/config/arm/arm-mve-builtins-base.h
index 30f8549c495..4fcf55715b6 100644
--- a/gcc/config/arm/arm-mve-builtins-base.h
+++ b/gcc/config/arm/arm-mve-builtins-base.h
@@ -24,6 +24,8 @@  namespace arm_mve {
 namespace functions {
 
 extern const function_base *const vaddq;
+extern const function_base *const vandq;
+extern const function_base *const veorq;
 extern const function_base *const vmulq;
 extern const function_base *const vreinterpretq;
 extern const function_base *const vsubq;
diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
index 42a1af2ae15..0ad0122e44f 100644
--- a/gcc/config/arm/arm_mve.h
+++ b/gcc/config/arm/arm_mve.h
@@ -77,14 +77,12 @@ 
 #define vmaxq(__a, __b) __arm_vmaxq(__a, __b)
 #define vhsubq(__a, __b) __arm_vhsubq(__a, __b)
 #define vhaddq(__a, __b) __arm_vhaddq(__a, __b)
-#define veorq(__a, __b) __arm_veorq(__a, __b)
 #define vcmphiq(__a, __b) __arm_vcmphiq(__a, __b)
 #define vcmpeqq(__a, __b) __arm_vcmpeqq(__a, __b)
 #define vcmpcsq(__a, __b) __arm_vcmpcsq(__a, __b)
 #define vcaddq_rot90(__a, __b) __arm_vcaddq_rot90(__a, __b)
 #define vcaddq_rot270(__a, __b) __arm_vcaddq_rot270(__a, __b)
 #define vbicq(__a, __b) __arm_vbicq(__a, __b)
-#define vandq(__a, __b) __arm_vandq(__a, __b)
 #define vaddvq_p(__a, __p) __arm_vaddvq_p(__a, __p)
 #define vaddvaq(__a, __b) __arm_vaddvaq(__a, __b)
 #define vabdq(__a, __b) __arm_vabdq(__a, __b)
@@ -236,12 +234,10 @@ 
 #define vabavq_p(__a, __b, __c, __p) __arm_vabavq_p(__a, __b, __c, __p)
 #define vshlq_m(__inactive, __a, __b, __p) __arm_vshlq_m(__inactive, __a, __b, __p)
 #define vabdq_m(__inactive, __a, __b, __p) __arm_vabdq_m(__inactive, __a, __b, __p)
-#define vandq_m(__inactive, __a, __b, __p) __arm_vandq_m(__inactive, __a, __b, __p)
 #define vbicq_m(__inactive, __a, __b, __p) __arm_vbicq_m(__inactive, __a, __b, __p)
 #define vbrsrq_m(__inactive, __a, __b, __p) __arm_vbrsrq_m(__inactive, __a, __b, __p)
 #define vcaddq_rot270_m(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m(__inactive, __a, __b, __p)
 #define vcaddq_rot90_m(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m(__inactive, __a, __b, __p)
-#define veorq_m(__inactive, __a, __b, __p) __arm_veorq_m(__inactive, __a, __b, __p)
 #define vhaddq_m(__inactive, __a, __b, __p) __arm_vhaddq_m(__inactive, __a, __b, __p)
 #define vhcaddq_rot270_m(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m(__inactive, __a, __b, __p)
 #define vhcaddq_rot90_m(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m(__inactive, __a, __b, __p)
@@ -404,10 +400,8 @@ 
 #define vhsubq_x(__a, __b, __p) __arm_vhsubq_x(__a, __b, __p)
 #define vrhaddq_x(__a, __b, __p) __arm_vrhaddq_x(__a, __b, __p)
 #define vrmulhq_x(__a, __b, __p) __arm_vrmulhq_x(__a, __b, __p)
-#define vandq_x(__a, __b, __p) __arm_vandq_x(__a, __b, __p)
 #define vbicq_x(__a, __b, __p) __arm_vbicq_x(__a, __b, __p)
 #define vbrsrq_x(__a, __b, __p) __arm_vbrsrq_x(__a, __b, __p)
-#define veorq_x(__a, __b, __p) __arm_veorq_x(__a, __b, __p)
 #define vmovlbq_x(__a, __p) __arm_vmovlbq_x(__a, __p)
 #define vmovltq_x(__a, __p) __arm_vmovltq_x(__a, __p)
 #define vmvnq_x(__a, __p) __arm_vmvnq_x(__a, __p)
@@ -702,7 +696,6 @@ 
 #define vhsubq_n_u8(__a, __b) __arm_vhsubq_n_u8(__a, __b)
 #define vhaddq_u8(__a, __b) __arm_vhaddq_u8(__a, __b)
 #define vhaddq_n_u8(__a, __b) __arm_vhaddq_n_u8(__a, __b)
-#define veorq_u8(__a, __b) __arm_veorq_u8(__a, __b)
 #define vcmpneq_n_u8(__a, __b) __arm_vcmpneq_n_u8(__a, __b)
 #define vcmphiq_u8(__a, __b) __arm_vcmphiq_u8(__a, __b)
 #define vcmphiq_n_u8(__a, __b) __arm_vcmphiq_n_u8(__a, __b)
@@ -713,7 +706,6 @@ 
 #define vcaddq_rot90_u8(__a, __b) __arm_vcaddq_rot90_u8(__a, __b)
 #define vcaddq_rot270_u8(__a, __b) __arm_vcaddq_rot270_u8(__a, __b)
 #define vbicq_u8(__a, __b) __arm_vbicq_u8(__a, __b)
-#define vandq_u8(__a, __b) __arm_vandq_u8(__a, __b)
 #define vaddvq_p_u8(__a, __p) __arm_vaddvq_p_u8(__a, __p)
 #define vaddvaq_u8(__a, __b) __arm_vaddvaq_u8(__a, __b)
 #define vabdq_u8(__a, __b) __arm_vabdq_u8(__a, __b)
@@ -781,12 +773,10 @@ 
 #define vhcaddq_rot270_s8(__a, __b) __arm_vhcaddq_rot270_s8(__a, __b)
 #define vhaddq_s8(__a, __b) __arm_vhaddq_s8(__a, __b)
 #define vhaddq_n_s8(__a, __b) __arm_vhaddq_n_s8(__a, __b)
-#define veorq_s8(__a, __b) __arm_veorq_s8(__a, __b)
 #define vcaddq_rot90_s8(__a, __b) __arm_vcaddq_rot90_s8(__a, __b)
 #define vcaddq_rot270_s8(__a, __b) __arm_vcaddq_rot270_s8(__a, __b)
 #define vbrsrq_n_s8(__a, __b) __arm_vbrsrq_n_s8(__a, __b)
 #define vbicq_s8(__a, __b) __arm_vbicq_s8(__a, __b)
-#define vandq_s8(__a, __b) __arm_vandq_s8(__a, __b)
 #define vaddvaq_s8(__a, __b) __arm_vaddvaq_s8(__a, __b)
 #define vabdq_s8(__a, __b) __arm_vabdq_s8(__a, __b)
 #define vshlq_n_s8(__a,  __imm) __arm_vshlq_n_s8(__a,  __imm)
@@ -812,7 +802,6 @@ 
 #define vhsubq_n_u16(__a, __b) __arm_vhsubq_n_u16(__a, __b)
 #define vhaddq_u16(__a, __b) __arm_vhaddq_u16(__a, __b)
 #define vhaddq_n_u16(__a, __b) __arm_vhaddq_n_u16(__a, __b)
-#define veorq_u16(__a, __b) __arm_veorq_u16(__a, __b)
 #define vcmpneq_n_u16(__a, __b) __arm_vcmpneq_n_u16(__a, __b)
 #define vcmphiq_u16(__a, __b) __arm_vcmphiq_u16(__a, __b)
 #define vcmphiq_n_u16(__a, __b) __arm_vcmphiq_n_u16(__a, __b)
@@ -823,7 +812,6 @@ 
 #define vcaddq_rot90_u16(__a, __b) __arm_vcaddq_rot90_u16(__a, __b)
 #define vcaddq_rot270_u16(__a, __b) __arm_vcaddq_rot270_u16(__a, __b)
 #define vbicq_u16(__a, __b) __arm_vbicq_u16(__a, __b)
-#define vandq_u16(__a, __b) __arm_vandq_u16(__a, __b)
 #define vaddvq_p_u16(__a, __p) __arm_vaddvq_p_u16(__a, __p)
 #define vaddvaq_u16(__a, __b) __arm_vaddvaq_u16(__a, __b)
 #define vabdq_u16(__a, __b) __arm_vabdq_u16(__a, __b)
@@ -891,12 +879,10 @@ 
 #define vhcaddq_rot270_s16(__a, __b) __arm_vhcaddq_rot270_s16(__a, __b)
 #define vhaddq_s16(__a, __b) __arm_vhaddq_s16(__a, __b)
 #define vhaddq_n_s16(__a, __b) __arm_vhaddq_n_s16(__a, __b)
-#define veorq_s16(__a, __b) __arm_veorq_s16(__a, __b)
 #define vcaddq_rot90_s16(__a, __b) __arm_vcaddq_rot90_s16(__a, __b)
 #define vcaddq_rot270_s16(__a, __b) __arm_vcaddq_rot270_s16(__a, __b)
 #define vbrsrq_n_s16(__a, __b) __arm_vbrsrq_n_s16(__a, __b)
 #define vbicq_s16(__a, __b) __arm_vbicq_s16(__a, __b)
-#define vandq_s16(__a, __b) __arm_vandq_s16(__a, __b)
 #define vaddvaq_s16(__a, __b) __arm_vaddvaq_s16(__a, __b)
 #define vabdq_s16(__a, __b) __arm_vabdq_s16(__a, __b)
 #define vshlq_n_s16(__a,  __imm) __arm_vshlq_n_s16(__a,  __imm)
@@ -922,7 +908,6 @@ 
 #define vhsubq_n_u32(__a, __b) __arm_vhsubq_n_u32(__a, __b)
 #define vhaddq_u32(__a, __b) __arm_vhaddq_u32(__a, __b)
 #define vhaddq_n_u32(__a, __b) __arm_vhaddq_n_u32(__a, __b)
-#define veorq_u32(__a, __b) __arm_veorq_u32(__a, __b)
 #define vcmpneq_n_u32(__a, __b) __arm_vcmpneq_n_u32(__a, __b)
 #define vcmphiq_u32(__a, __b) __arm_vcmphiq_u32(__a, __b)
 #define vcmphiq_n_u32(__a, __b) __arm_vcmphiq_n_u32(__a, __b)
@@ -933,7 +918,6 @@ 
 #define vcaddq_rot90_u32(__a, __b) __arm_vcaddq_rot90_u32(__a, __b)
 #define vcaddq_rot270_u32(__a, __b) __arm_vcaddq_rot270_u32(__a, __b)
 #define vbicq_u32(__a, __b) __arm_vbicq_u32(__a, __b)
-#define vandq_u32(__a, __b) __arm_vandq_u32(__a, __b)
 #define vaddvq_p_u32(__a, __p) __arm_vaddvq_p_u32(__a, __p)
 #define vaddvaq_u32(__a, __b) __arm_vaddvaq_u32(__a, __b)
 #define vabdq_u32(__a, __b) __arm_vabdq_u32(__a, __b)
@@ -1001,12 +985,10 @@ 
 #define vhcaddq_rot270_s32(__a, __b) __arm_vhcaddq_rot270_s32(__a, __b)
 #define vhaddq_s32(__a, __b) __arm_vhaddq_s32(__a, __b)
 #define vhaddq_n_s32(__a, __b) __arm_vhaddq_n_s32(__a, __b)
-#define veorq_s32(__a, __b) __arm_veorq_s32(__a, __b)
 #define vcaddq_rot90_s32(__a, __b) __arm_vcaddq_rot90_s32(__a, __b)
 #define vcaddq_rot270_s32(__a, __b) __arm_vcaddq_rot270_s32(__a, __b)
 #define vbrsrq_n_s32(__a, __b) __arm_vbrsrq_n_s32(__a, __b)
 #define vbicq_s32(__a, __b) __arm_vbicq_s32(__a, __b)
-#define vandq_s32(__a, __b) __arm_vandq_s32(__a, __b)
 #define vaddvaq_s32(__a, __b) __arm_vaddvaq_s32(__a, __b)
 #define vabdq_s32(__a, __b) __arm_vabdq_s32(__a, __b)
 #define vshlq_n_s32(__a,  __imm) __arm_vshlq_n_s32(__a,  __imm)
@@ -1059,7 +1041,6 @@ 
 #define vmaxnmq_f16(__a, __b) __arm_vmaxnmq_f16(__a, __b)
 #define vmaxnmavq_f16(__a, __b) __arm_vmaxnmavq_f16(__a, __b)
 #define vmaxnmaq_f16(__a, __b) __arm_vmaxnmaq_f16(__a, __b)
-#define veorq_f16(__a, __b) __arm_veorq_f16(__a, __b)
 #define vcmulq_rot90_f16(__a, __b) __arm_vcmulq_rot90_f16(__a, __b)
 #define vcmulq_rot270_f16(__a, __b) __arm_vcmulq_rot270_f16(__a, __b)
 #define vcmulq_rot180_f16(__a, __b) __arm_vcmulq_rot180_f16(__a, __b)
@@ -1067,7 +1048,6 @@ 
 #define vcaddq_rot90_f16(__a, __b) __arm_vcaddq_rot90_f16(__a, __b)
 #define vcaddq_rot270_f16(__a, __b) __arm_vcaddq_rot270_f16(__a, __b)
 #define vbicq_f16(__a, __b) __arm_vbicq_f16(__a, __b)
-#define vandq_f16(__a, __b) __arm_vandq_f16(__a, __b)
 #define vabdq_f16(__a, __b) __arm_vabdq_f16(__a, __b)
 #define vshlltq_n_s8(__a,  __imm) __arm_vshlltq_n_s8(__a,  __imm)
 #define vshllbq_n_s8(__a,  __imm) __arm_vshllbq_n_s8(__a,  __imm)
@@ -1120,7 +1100,6 @@ 
 #define vmaxnmq_f32(__a, __b) __arm_vmaxnmq_f32(__a, __b)
 #define vmaxnmavq_f32(__a, __b) __arm_vmaxnmavq_f32(__a, __b)
 #define vmaxnmaq_f32(__a, __b) __arm_vmaxnmaq_f32(__a, __b)
-#define veorq_f32(__a, __b) __arm_veorq_f32(__a, __b)
 #define vcmulq_rot90_f32(__a, __b) __arm_vcmulq_rot90_f32(__a, __b)
 #define vcmulq_rot270_f32(__a, __b) __arm_vcmulq_rot270_f32(__a, __b)
 #define vcmulq_rot180_f32(__a, __b) __arm_vcmulq_rot180_f32(__a, __b)
@@ -1128,7 +1107,6 @@ 
 #define vcaddq_rot90_f32(__a, __b) __arm_vcaddq_rot90_f32(__a, __b)
 #define vcaddq_rot270_f32(__a, __b) __arm_vcaddq_rot270_f32(__a, __b)
 #define vbicq_f32(__a, __b) __arm_vbicq_f32(__a, __b)
-#define vandq_f32(__a, __b) __arm_vandq_f32(__a, __b)
 #define vabdq_f32(__a, __b) __arm_vabdq_f32(__a, __b)
 #define vshlltq_n_s16(__a,  __imm) __arm_vshlltq_n_s16(__a,  __imm)
 #define vshllbq_n_s16(__a,  __imm) __arm_vshllbq_n_s16(__a,  __imm)
@@ -1662,12 +1640,6 @@ 
 #define vabdq_m_u8(__inactive, __a, __b, __p) __arm_vabdq_m_u8(__inactive, __a, __b, __p)
 #define vabdq_m_u32(__inactive, __a, __b, __p) __arm_vabdq_m_u32(__inactive, __a, __b, __p)
 #define vabdq_m_u16(__inactive, __a, __b, __p) __arm_vabdq_m_u16(__inactive, __a, __b, __p)
-#define vandq_m_s8(__inactive, __a, __b, __p) __arm_vandq_m_s8(__inactive, __a, __b, __p)
-#define vandq_m_s32(__inactive, __a, __b, __p) __arm_vandq_m_s32(__inactive, __a, __b, __p)
-#define vandq_m_s16(__inactive, __a, __b, __p) __arm_vandq_m_s16(__inactive, __a, __b, __p)
-#define vandq_m_u8(__inactive, __a, __b, __p) __arm_vandq_m_u8(__inactive, __a, __b, __p)
-#define vandq_m_u32(__inactive, __a, __b, __p) __arm_vandq_m_u32(__inactive, __a, __b, __p)
-#define vandq_m_u16(__inactive, __a, __b, __p) __arm_vandq_m_u16(__inactive, __a, __b, __p)
 #define vbicq_m_s8(__inactive, __a, __b, __p) __arm_vbicq_m_s8(__inactive, __a, __b, __p)
 #define vbicq_m_s32(__inactive, __a, __b, __p) __arm_vbicq_m_s32(__inactive, __a, __b, __p)
 #define vbicq_m_s16(__inactive, __a, __b, __p) __arm_vbicq_m_s16(__inactive, __a, __b, __p)
@@ -1692,12 +1664,6 @@ 
 #define vcaddq_rot90_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u8(__inactive, __a, __b, __p)
 #define vcaddq_rot90_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u32(__inactive, __a, __b, __p)
 #define vcaddq_rot90_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u16(__inactive, __a, __b, __p)
-#define veorq_m_s8(__inactive, __a, __b, __p) __arm_veorq_m_s8(__inactive, __a, __b, __p)
-#define veorq_m_s32(__inactive, __a, __b, __p) __arm_veorq_m_s32(__inactive, __a, __b, __p)
-#define veorq_m_s16(__inactive, __a, __b, __p) __arm_veorq_m_s16(__inactive, __a, __b, __p)
-#define veorq_m_u8(__inactive, __a, __b, __p) __arm_veorq_m_u8(__inactive, __a, __b, __p)
-#define veorq_m_u32(__inactive, __a, __b, __p) __arm_veorq_m_u32(__inactive, __a, __b, __p)
-#define veorq_m_u16(__inactive, __a, __b, __p) __arm_veorq_m_u16(__inactive, __a, __b, __p)
 #define vhaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s8(__inactive, __a, __b, __p)
 #define vhaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s32(__inactive, __a, __b, __p)
 #define vhaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s16(__inactive, __a, __b, __p)
@@ -2006,8 +1972,6 @@ 
 #define vshrntq_m_n_u16(__a, __b,  __imm, __p) __arm_vshrntq_m_n_u16(__a, __b,  __imm, __p)
 #define vabdq_m_f32(__inactive, __a, __b, __p) __arm_vabdq_m_f32(__inactive, __a, __b, __p)
 #define vabdq_m_f16(__inactive, __a, __b, __p) __arm_vabdq_m_f16(__inactive, __a, __b, __p)
-#define vandq_m_f32(__inactive, __a, __b, __p) __arm_vandq_m_f32(__inactive, __a, __b, __p)
-#define vandq_m_f16(__inactive, __a, __b, __p) __arm_vandq_m_f16(__inactive, __a, __b, __p)
 #define vbicq_m_f32(__inactive, __a, __b, __p) __arm_vbicq_m_f32(__inactive, __a, __b, __p)
 #define vbicq_m_f16(__inactive, __a, __b, __p) __arm_vbicq_m_f16(__inactive, __a, __b, __p)
 #define vbrsrq_m_n_f32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f32(__inactive, __a, __b, __p)
@@ -2036,8 +2000,6 @@ 
 #define vcvtq_m_n_s16_f16(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_s16_f16(__inactive, __a,  __imm6, __p)
 #define vcvtq_m_n_u32_f32(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_u32_f32(__inactive, __a,  __imm6, __p)
 #define vcvtq_m_n_u16_f16(__inactive, __a,  __imm6, __p) __arm_vcvtq_m_n_u16_f16(__inactive, __a,  __imm6, __p)
-#define veorq_m_f32(__inactive, __a, __b, __p) __arm_veorq_m_f32(__inactive, __a, __b, __p)
-#define veorq_m_f16(__inactive, __a, __b, __p) __arm_veorq_m_f16(__inactive, __a, __b, __p)
 #define vfmaq_m_f32(__a, __b, __c, __p) __arm_vfmaq_m_f32(__a, __b, __c, __p)
 #define vfmaq_m_f16(__a, __b, __c, __p) __arm_vfmaq_m_f16(__a, __b, __c, __p)
 #define vfmaq_m_n_f32(__a, __b, __c, __p) __arm_vfmaq_m_n_f32(__a, __b, __c, __p)
@@ -2467,12 +2429,6 @@ 
 #define vrmulhq_x_u8(__a, __b, __p) __arm_vrmulhq_x_u8(__a, __b, __p)
 #define vrmulhq_x_u16(__a, __b, __p) __arm_vrmulhq_x_u16(__a, __b, __p)
 #define vrmulhq_x_u32(__a, __b, __p) __arm_vrmulhq_x_u32(__a, __b, __p)
-#define vandq_x_s8(__a, __b, __p) __arm_vandq_x_s8(__a, __b, __p)
-#define vandq_x_s16(__a, __b, __p) __arm_vandq_x_s16(__a, __b, __p)
-#define vandq_x_s32(__a, __b, __p) __arm_vandq_x_s32(__a, __b, __p)
-#define vandq_x_u8(__a, __b, __p) __arm_vandq_x_u8(__a, __b, __p)
-#define vandq_x_u16(__a, __b, __p) __arm_vandq_x_u16(__a, __b, __p)
-#define vandq_x_u32(__a, __b, __p) __arm_vandq_x_u32(__a, __b, __p)
 #define vbicq_x_s8(__a, __b, __p) __arm_vbicq_x_s8(__a, __b, __p)
 #define vbicq_x_s16(__a, __b, __p) __arm_vbicq_x_s16(__a, __b, __p)
 #define vbicq_x_s32(__a, __b, __p) __arm_vbicq_x_s32(__a, __b, __p)
@@ -2485,12 +2441,6 @@ 
 #define vbrsrq_x_n_u8(__a, __b, __p) __arm_vbrsrq_x_n_u8(__a, __b, __p)
 #define vbrsrq_x_n_u16(__a, __b, __p) __arm_vbrsrq_x_n_u16(__a, __b, __p)
 #define vbrsrq_x_n_u32(__a, __b, __p) __arm_vbrsrq_x_n_u32(__a, __b, __p)
-#define veorq_x_s8(__a, __b, __p) __arm_veorq_x_s8(__a, __b, __p)
-#define veorq_x_s16(__a, __b, __p) __arm_veorq_x_s16(__a, __b, __p)
-#define veorq_x_s32(__a, __b, __p) __arm_veorq_x_s32(__a, __b, __p)
-#define veorq_x_u8(__a, __b, __p) __arm_veorq_x_u8(__a, __b, __p)
-#define veorq_x_u16(__a, __b, __p) __arm_veorq_x_u16(__a, __b, __p)
-#define veorq_x_u32(__a, __b, __p) __arm_veorq_x_u32(__a, __b, __p)
 #define vmovlbq_x_s8(__a, __p) __arm_vmovlbq_x_s8(__a, __p)
 #define vmovlbq_x_s16(__a, __p) __arm_vmovlbq_x_s16(__a, __p)
 #define vmovlbq_x_u8(__a, __p) __arm_vmovlbq_x_u8(__a, __p)
@@ -2641,14 +2591,10 @@ 
 #define vrndaq_x_f32(__a, __p) __arm_vrndaq_x_f32(__a, __p)
 #define vrndxq_x_f16(__a, __p) __arm_vrndxq_x_f16(__a, __p)
 #define vrndxq_x_f32(__a, __p) __arm_vrndxq_x_f32(__a, __p)
-#define vandq_x_f16(__a, __b, __p) __arm_vandq_x_f16(__a, __b, __p)
-#define vandq_x_f32(__a, __b, __p) __arm_vandq_x_f32(__a, __b, __p)
 #define vbicq_x_f16(__a, __b, __p) __arm_vbicq_x_f16(__a, __b, __p)
 #define vbicq_x_f32(__a, __b, __p) __arm_vbicq_x_f32(__a, __b, __p)
 #define vbrsrq_x_n_f16(__a, __b, __p) __arm_vbrsrq_x_n_f16(__a, __b, __p)
 #define vbrsrq_x_n_f32(__a, __b, __p) __arm_vbrsrq_x_n_f32(__a, __b, __p)
-#define veorq_x_f16(__a, __b, __p) __arm_veorq_x_f16(__a, __b, __p)
-#define veorq_x_f32(__a, __b, __p) __arm_veorq_x_f32(__a, __b, __p)
 #define vornq_x_f16(__a, __b, __p) __arm_vornq_x_f16(__a, __b, __p)
 #define vornq_x_f32(__a, __b, __p) __arm_vornq_x_f32(__a, __b, __p)
 #define vorrq_x_f16(__a, __b, __p) __arm_vorrq_x_f16(__a, __b, __p)
@@ -3647,13 +3593,6 @@  __arm_vhaddq_n_u8 (uint8x16_t __a, uint8_t __b)
   return __builtin_mve_vhaddq_n_uv16qi (__a, __b);
 }
 
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
-  return __builtin_mve_veorq_uv16qi (__a, __b);
-}
-
 __extension__ extern __inline mve_pred16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcmpneq_n_u8 (uint8x16_t __a, uint8_t __b)
@@ -3726,13 +3665,6 @@  __arm_vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
   return __builtin_mve_vbicq_uv16qi (__a, __b);
 }
 
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
-  return __builtin_mve_vandq_uv16qi (__a, __b);
-}
-
 __extension__ extern __inline uint32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vaddvq_p_u8 (uint8x16_t __a, mve_pred16_t __p)
@@ -4202,13 +4134,6 @@  __arm_vhaddq_n_s8 (int8x16_t __a, int8_t __b)
   return __builtin_mve_vhaddq_n_sv16qi (__a, __b);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_s8 (int8x16_t __a, int8x16_t __b)
-{
-  return __builtin_mve_veorq_sv16qi (__a, __b);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b)
@@ -4237,13 +4162,6 @@  __arm_vbicq_s8 (int8x16_t __a, int8x16_t __b)
   return __builtin_mve_vbicq_sv16qi (__a, __b);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_s8 (int8x16_t __a, int8x16_t __b)
-{
-  return __builtin_mve_vandq_sv16qi (__a, __b);
-}
-
 __extension__ extern __inline int32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vaddvaq_s8 (int32_t __a, int8x16_t __b)
@@ -4419,13 +4337,6 @@  __arm_vhaddq_n_u16 (uint16x8_t __a, uint16_t __b)
   return __builtin_mve_vhaddq_n_uv8hi (__a, __b);
 }
 
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
-  return __builtin_mve_veorq_uv8hi (__a, __b);
-}
-
 __extension__ extern __inline mve_pred16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcmpneq_n_u16 (uint16x8_t __a, uint16_t __b)
@@ -4498,13 +4409,6 @@  __arm_vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
   return __builtin_mve_vbicq_uv8hi (__a, __b);
 }
 
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
-  return __builtin_mve_vandq_uv8hi (__a, __b);
-}
-
 __extension__ extern __inline uint32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vaddvq_p_u16 (uint16x8_t __a, mve_pred16_t __p)
@@ -4974,13 +4878,6 @@  __arm_vhaddq_n_s16 (int16x8_t __a, int16_t __b)
   return __builtin_mve_vhaddq_n_sv8hi (__a, __b);
 }
 
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_s16 (int16x8_t __a, int16x8_t __b)
-{
-  return __builtin_mve_veorq_sv8hi (__a, __b);
-}
-
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b)
@@ -5009,13 +4906,6 @@  __arm_vbicq_s16 (int16x8_t __a, int16x8_t __b)
   return __builtin_mve_vbicq_sv8hi (__a, __b);
 }
 
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_s16 (int16x8_t __a, int16x8_t __b)
-{
-  return __builtin_mve_vandq_sv8hi (__a, __b);
-}
-
 __extension__ extern __inline int32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vaddvaq_s16 (int32_t __a, int16x8_t __b)
@@ -5191,13 +5081,6 @@  __arm_vhaddq_n_u32 (uint32x4_t __a, uint32_t __b)
   return __builtin_mve_vhaddq_n_uv4si (__a, __b);
 }
 
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
-  return __builtin_mve_veorq_uv4si (__a, __b);
-}
-
 __extension__ extern __inline mve_pred16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcmpneq_n_u32 (uint32x4_t __a, uint32_t __b)
@@ -5270,13 +5153,6 @@  __arm_vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
   return __builtin_mve_vbicq_uv4si (__a, __b);
 }
 
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
-  return __builtin_mve_vandq_uv4si (__a, __b);
-}
-
 __extension__ extern __inline uint32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vaddvq_p_u32 (uint32x4_t __a, mve_pred16_t __p)
@@ -5746,13 +5622,6 @@  __arm_vhaddq_n_s32 (int32x4_t __a, int32_t __b)
   return __builtin_mve_vhaddq_n_sv4si (__a, __b);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_s32 (int32x4_t __a, int32x4_t __b)
-{
-  return __builtin_mve_veorq_sv4si (__a, __b);
-}
-
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b)
@@ -5781,13 +5650,6 @@  __arm_vbicq_s32 (int32x4_t __a, int32x4_t __b)
   return __builtin_mve_vbicq_sv4si (__a, __b);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_s32 (int32x4_t __a, int32x4_t __b)
-{
-  return __builtin_mve_vandq_sv4si (__a, __b);
-}
-
 __extension__ extern __inline int32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vaddvaq_s32 (int32_t __a, int32x4_t __b)
@@ -9175,48 +9037,6 @@  __arm_vabdq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pr
   return __builtin_mve_vabdq_m_uv8hi (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_sv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_sv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_sv8hi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_uv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_uv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_uv8hi (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vbicq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
@@ -9385,48 +9205,6 @@  __arm_vcaddq_rot90_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b,
   return __builtin_mve_vcaddq_rot90_m_uv8hi (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_sv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_sv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_sv8hi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_uv16qi (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_uv4si (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_uv8hi (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vhaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
@@ -14285,48 +14063,6 @@  __arm_vrmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
   return __builtin_mve_vrmulhq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vbicq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
@@ -14411,48 +14147,6 @@  __arm_vbrsrq_x_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
   return __builtin_mve_vbrsrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
-}
-
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmovlbq_x_s8 (int8x16_t __a, mve_pred16_t __p)
@@ -16300,13 +15994,6 @@  __arm_vmaxnmaq_f16 (float16x8_t __a, float16x8_t __b)
   return __builtin_mve_vmaxnmaq_fv8hf (__a, __b);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_f16 (float16x8_t __a, float16x8_t __b)
-{
-  return __builtin_mve_veorq_fv8hf (__a, __b);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcmulq_rot90_f16 (float16x8_t __a, float16x8_t __b)
@@ -16356,13 +16043,6 @@  __arm_vbicq_f16 (float16x8_t __a, float16x8_t __b)
   return __builtin_mve_vbicq_fv8hf (__a, __b);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_f16 (float16x8_t __a, float16x8_t __b)
-{
-  return __builtin_mve_vandq_fv8hf (__a, __b);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq_f16 (float16x8_t __a, float16x8_t __b)
@@ -16524,13 +16204,6 @@  __arm_vmaxnmaq_f32 (float32x4_t __a, float32x4_t __b)
   return __builtin_mve_vmaxnmaq_fv4sf (__a, __b);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_f32 (float32x4_t __a, float32x4_t __b)
-{
-  return __builtin_mve_veorq_fv4sf (__a, __b);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcmulq_rot90_f32 (float32x4_t __a, float32x4_t __b)
@@ -16580,13 +16253,6 @@  __arm_vbicq_f32 (float32x4_t __a, float32x4_t __b)
   return __builtin_mve_vbicq_fv4sf (__a, __b);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_f32 (float32x4_t __a, float32x4_t __b)
-{
-  return __builtin_mve_vandq_fv4sf (__a, __b);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq_f32 (float32x4_t __a, float32x4_t __b)
@@ -17372,20 +17038,6 @@  __arm_vabdq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve
   return __builtin_mve_vabdq_m_fv8hf (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_fv4sf (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_fv8hf (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vbicq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
@@ -17582,20 +17234,6 @@  __arm_vcvtq_m_n_u16_f16 (uint16x8_t __inactive, float16x8_t __a, const int __imm
   return __builtin_mve_vcvtq_m_n_from_f_uv8hi (__inactive, __a, __imm6, __p);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_fv4sf (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_fv8hf (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vfmaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
@@ -18456,20 +18094,6 @@  __arm_vrndxq_x_f32 (float32x4_t __a, mve_pred16_t __p)
   return __builtin_mve_vrndxq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_vandq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vbicq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
@@ -18498,20 +18122,6 @@  __arm_vbrsrq_x_n_f32 (float32x4_t __a, int32_t __b, mve_pred16_t __p)
   return __builtin_mve_vbrsrq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
-  return __builtin_mve_veorq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vornq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
@@ -19428,13 +19038,6 @@  __arm_vhaddq (uint8x16_t __a, uint8_t __b)
  return __arm_vhaddq_n_u8 (__a, __b);
 }
 
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq (uint8x16_t __a, uint8x16_t __b)
-{
- return __arm_veorq_u8 (__a, __b);
-}
-
 __extension__ extern __inline mve_pred16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcmpneq (uint8x16_t __a, uint8_t __b)
@@ -19505,13 +19108,6 @@  __arm_vbicq (uint8x16_t __a, uint8x16_t __b)
  return __arm_vbicq_u8 (__a, __b);
 }
 
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq (uint8x16_t __a, uint8x16_t __b)
-{
- return __arm_vandq_u8 (__a, __b);
-}
-
 __extension__ extern __inline uint32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vaddvq_p (uint8x16_t __a, mve_pred16_t __p)
@@ -19981,13 +19577,6 @@  __arm_vhaddq (int8x16_t __a, int8_t __b)
  return __arm_vhaddq_n_s8 (__a, __b);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq (int8x16_t __a, int8x16_t __b)
-{
- return __arm_veorq_s8 (__a, __b);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcaddq_rot90 (int8x16_t __a, int8x16_t __b)
@@ -20016,13 +19605,6 @@  __arm_vbicq (int8x16_t __a, int8x16_t __b)
  return __arm_vbicq_s8 (__a, __b);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq (int8x16_t __a, int8x16_t __b)
-{
- return __arm_vandq_s8 (__a, __b);
-}
-
 __extension__ extern __inline int32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vaddvaq (int32_t __a, int8x16_t __b)
@@ -20198,13 +19780,6 @@  __arm_vhaddq (uint16x8_t __a, uint16_t __b)
  return __arm_vhaddq_n_u16 (__a, __b);
 }
 
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq (uint16x8_t __a, uint16x8_t __b)
-{
- return __arm_veorq_u16 (__a, __b);
-}
-
 __extension__ extern __inline mve_pred16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcmpneq (uint16x8_t __a, uint16_t __b)
@@ -20275,13 +19850,6 @@  __arm_vbicq (uint16x8_t __a, uint16x8_t __b)
  return __arm_vbicq_u16 (__a, __b);
 }
 
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq (uint16x8_t __a, uint16x8_t __b)
-{
- return __arm_vandq_u16 (__a, __b);
-}
-
 __extension__ extern __inline uint32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vaddvq_p (uint16x8_t __a, mve_pred16_t __p)
@@ -20751,13 +20319,6 @@  __arm_vhaddq (int16x8_t __a, int16_t __b)
  return __arm_vhaddq_n_s16 (__a, __b);
 }
 
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq (int16x8_t __a, int16x8_t __b)
-{
- return __arm_veorq_s16 (__a, __b);
-}
-
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcaddq_rot90 (int16x8_t __a, int16x8_t __b)
@@ -20786,13 +20347,6 @@  __arm_vbicq (int16x8_t __a, int16x8_t __b)
  return __arm_vbicq_s16 (__a, __b);
 }
 
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq (int16x8_t __a, int16x8_t __b)
-{
- return __arm_vandq_s16 (__a, __b);
-}
-
 __extension__ extern __inline int32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vaddvaq (int32_t __a, int16x8_t __b)
@@ -20968,13 +20522,6 @@  __arm_vhaddq (uint32x4_t __a, uint32_t __b)
  return __arm_vhaddq_n_u32 (__a, __b);
 }
 
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq (uint32x4_t __a, uint32x4_t __b)
-{
- return __arm_veorq_u32 (__a, __b);
-}
-
 __extension__ extern __inline mve_pred16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcmpneq (uint32x4_t __a, uint32_t __b)
@@ -21045,13 +20592,6 @@  __arm_vbicq (uint32x4_t __a, uint32x4_t __b)
  return __arm_vbicq_u32 (__a, __b);
 }
 
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq (uint32x4_t __a, uint32x4_t __b)
-{
- return __arm_vandq_u32 (__a, __b);
-}
-
 __extension__ extern __inline uint32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vaddvq_p (uint32x4_t __a, mve_pred16_t __p)
@@ -21521,13 +21061,6 @@  __arm_vhaddq (int32x4_t __a, int32_t __b)
  return __arm_vhaddq_n_s32 (__a, __b);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq (int32x4_t __a, int32x4_t __b)
-{
- return __arm_veorq_s32 (__a, __b);
-}
-
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcaddq_rot90 (int32x4_t __a, int32x4_t __b)
@@ -21556,13 +21089,6 @@  __arm_vbicq (int32x4_t __a, int32x4_t __b)
  return __arm_vbicq_s32 (__a, __b);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq (int32x4_t __a, int32x4_t __b)
-{
- return __arm_vandq_s32 (__a, __b);
-}
-
 __extension__ extern __inline int32_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vaddvaq (int32_t __a, int32x4_t __b)
@@ -24909,48 +24435,6 @@  __arm_vabdq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16
  return __arm_vabdq_m_u16 (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_m_s8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_m_s32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_m_s16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_m_u8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_m_u32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_m_u16 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vbicq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
@@ -25119,48 +24603,6 @@  __arm_vcaddq_rot90_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve
  return __arm_vcaddq_rot90_m_u16 (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_m_s8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_m_s32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_m_s16 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_m_u8 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_m_u32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_m_u16 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vhaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
@@ -29550,48 +28992,6 @@  __arm_vrmulhq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
  return __arm_vrmulhq_x_u32 (__a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_x_s8 (__a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_x_s16 (__a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_x_s32 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_x_u8 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_x_u16 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_x_u32 (__a, __b, __p);
-}
-
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vbicq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
@@ -29676,48 +29076,6 @@  __arm_vbrsrq_x (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
  return __arm_vbrsrq_x_n_u32 (__a, __b, __p);
 }
 
-__extension__ extern __inline int8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_x_s8 (__a, __b, __p);
-}
-
-__extension__ extern __inline int16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_x_s16 (__a, __b, __p);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_x_s32 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint8x16_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_x_u8 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_x_u16 (__a, __b, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_x_u32 (__a, __b, __p);
-}
-
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vmovlbq_x (int8x16_t __a, mve_pred16_t __p)
@@ -31127,13 +30485,6 @@  __arm_vmaxnmaq (float16x8_t __a, float16x8_t __b)
  return __arm_vmaxnmaq_f16 (__a, __b);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq (float16x8_t __a, float16x8_t __b)
-{
- return __arm_veorq_f16 (__a, __b);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcmulq_rot90 (float16x8_t __a, float16x8_t __b)
@@ -31183,13 +30534,6 @@  __arm_vbicq (float16x8_t __a, float16x8_t __b)
  return __arm_vbicq_f16 (__a, __b);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq (float16x8_t __a, float16x8_t __b)
-{
- return __arm_vandq_f16 (__a, __b);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq (float16x8_t __a, float16x8_t __b)
@@ -31351,13 +30695,6 @@  __arm_vmaxnmaq (float32x4_t __a, float32x4_t __b)
  return __arm_vmaxnmaq_f32 (__a, __b);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq (float32x4_t __a, float32x4_t __b)
-{
- return __arm_veorq_f32 (__a, __b);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vcmulq_rot90 (float32x4_t __a, float32x4_t __b)
@@ -31407,13 +30744,6 @@  __arm_vbicq (float32x4_t __a, float32x4_t __b)
  return __arm_vbicq_f32 (__a, __b);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq (float32x4_t __a, float32x4_t __b)
-{
- return __arm_vandq_f32 (__a, __b);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vabdq (float32x4_t __a, float32x4_t __b)
@@ -32184,20 +31514,6 @@  __arm_vabdq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pre
  return __arm_vabdq_m_f16 (__inactive, __a, __b, __p);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_m_f32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_m_f16 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vbicq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
@@ -32394,20 +31710,6 @@  __arm_vcvtq_m_n (uint16x8_t __inactive, float16x8_t __a, const int __imm6, mve_p
  return __arm_vcvtq_m_n_u16_f16 (__inactive, __a, __imm6, __p);
 }
 
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_m_f32 (__inactive, __a, __b, __p);
-}
-
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_m_f16 (__inactive, __a, __b, __p);
-}
-
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vfmaq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
@@ -33010,20 +32312,6 @@  __arm_vrndxq_x (float32x4_t __a, mve_pred16_t __p)
  return __arm_vrndxq_x_f32 (__a, __p);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_x_f16 (__a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vandq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
- return __arm_vandq_x_f32 (__a, __b, __p);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vbicq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
@@ -33052,20 +32340,6 @@  __arm_vbrsrq_x (float32x4_t __a, int32_t __b, mve_pred16_t __p)
  return __arm_vbrsrq_x_n_f32 (__a, __b, __p);
 }
 
-__extension__ extern __inline float16x8_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_x_f16 (__a, __b, __p);
-}
-
-__extension__ extern __inline float32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_veorq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
-{
- return __arm_veorq_x_f32 (__a, __b, __p);
-}
-
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vornq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
@@ -33678,18 +32952,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
 
-#define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
-
 #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -33868,18 +33130,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
 
-#define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
-
 #define __arm_vmaxnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -35060,19 +34310,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
 
-#define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
-
 #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
@@ -35180,19 +34417,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_m_f16(__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
   int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_m_f32(__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
 
-#define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
-
 #define __arm_vfmaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
@@ -35588,18 +34812,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
   int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
 
-#define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8  (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
-
 #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
@@ -35679,18 +34891,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_n_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
   int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_n_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
 
-#define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
-  int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
-  int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
-
 #define __arm_vmaxnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
@@ -36251,16 +35451,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
 
-#define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
-
 #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -36304,16 +35494,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
 
-#define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
-
 #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
@@ -36998,17 +36178,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
 
-#define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
-
 #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
@@ -37053,17 +36222,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
 
-#define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
-
 #define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
@@ -37360,16 +36518,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
 
-#define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
-
 #define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
   _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
   int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
@@ -37478,16 +36626,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
   int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
 
-#define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8  (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
-  int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
-  int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
-  int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
-
 #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \
   _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \