Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/2237927/?format=api
{ "id": 2237927, "url": "http://patchwork.ozlabs.org/api/patches/2237927/?format=api", "web_url": "http://patchwork.ozlabs.org/project/gcc/patch/bmm.hihq3sdm4a.gcc.gcc-TEST.karmea01.158.1.2@forge-stage.sourceware.org/", "project": { "id": 17, "url": "http://patchwork.ozlabs.org/api/projects/17/?format=api", "name": "GNU Compiler Collection", "link_name": "gcc", "list_id": "gcc-patches.gcc.gnu.org", "list_email": "gcc-patches@gcc.gnu.org", "web_url": null, "scm_url": null, "webscm_url": null, "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<bmm.hihq3sdm4a.gcc.gcc-TEST.karmea01.158.1.2@forge-stage.sourceware.org>", "list_archive_url": null, "date": "2026-05-13T16:04:12", "name": "[v1,2/6] aarch64: Port NEON add intrinsics to pragma-based framework", "commit_ref": null, "pull_url": null, "state": "new", "archived": false, "hash": "0e9e7a65d204b57d9bb6e1c0840998f54663e582", "submitter": { "id": 92188, "url": "http://patchwork.ozlabs.org/api/people/92188/?format=api", "name": "Karl Meakin via Sourceware Forge", "email": "forge-bot+karmea01@forge-stage.sourceware.org" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/gcc/patch/bmm.hihq3sdm4a.gcc.gcc-TEST.karmea01.158.1.2@forge-stage.sourceware.org/mbox/", "series": [ { "id": 504183, "url": "http://patchwork.ozlabs.org/api/series/504183/?format=api", "web_url": "http://patchwork.ozlabs.org/project/gcc/list/?series=504183", "date": "2026-05-13T16:04:10", "name": "aarch64: port NEON intrinsics to pragma-based framework", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/504183/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/2237927/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/2237927/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "gcc-patches@gcc.gnu.org" ], "Delivered-To": [ "patchwork-incoming@legolas.ozlabs.org", "gcc-patches@gcc.gnu.org" ], "Authentication-Results": [ "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=gcc.gnu.org\n (client-ip=2620:52:6:3111::32; helo=vm01.sourceware.org;\n envelope-from=gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org;\n receiver=patchwork.ozlabs.org)", "sourceware.org; dmarc=none (p=none dis=none)\n header.from=forge-stage.sourceware.org", "sourceware.org;\n spf=pass smtp.mailfrom=forge-stage.sourceware.org", "sourceware.org;\n arc=none smtp.remote-ip=2620:52:6:3111::39" ], "Received": [ "from vm01.sourceware.org (vm01.sourceware.org\n [IPv6:2620:52:6:3111::32])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature ECDSA (secp384r1) server-digest SHA384)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4gFyzV1Npxz1y5L\n\tfor <incoming@patchwork.ozlabs.org>; Thu, 14 May 2026 02:07:50 +1000 (AEST)", "from vm01.sourceware.org (localhost [IPv6:::1])\n\tby sourceware.org (Postfix) with ESMTP id 4BEF14BBC0A0\n\tfor <incoming@patchwork.ozlabs.org>; Wed, 13 May 2026 16:07:48 +0000 (GMT)", "from forge-stage.sourceware.org (vm08.sourceware.org\n [IPv6:2620:52:6:3111::39])\n by sourceware.org (Postfix) with ESMTPS id 96DBD4BB8F64\n for <gcc-patches@gcc.gnu.org>; Wed, 13 May 2026 16:05:10 +0000 (GMT)", "from forge-stage.sourceware.org (localhost [IPv6:::1])\n (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n key-exchange x25519 server-signature ECDSA (prime256v1) server-digest SHA256)\n (No client certificate requested)\n by forge-stage.sourceware.org (Postfix) with ESMTPS id 6431442D16;\n Wed, 13 May 2026 16:05:10 +0000 (UTC)" ], "DKIM-Filter": [ "OpenDKIM Filter v2.11.0 sourceware.org 4BEF14BBC0A0", "OpenDKIM Filter v2.11.0 sourceware.org 96DBD4BB8F64" ], "DMARC-Filter": "OpenDMARC Filter v1.4.2 sourceware.org 96DBD4BB8F64", "ARC-Filter": "OpenARC Filter v1.0.0 sourceware.org 96DBD4BB8F64", "ARC-Seal": "i=1; a=rsa-sha256; d=sourceware.org; s=key; t=1778688310; cv=none;\n b=E4B6Cd6grKvekoS6yUscyQcej3z4erVulo4PI7XTY/b2c5QpMUQuZM2o+NpJf+byRcfzhAcHH8l77/4FwK3p/lW8tW0JYpjiiGWYDMIPAwxWMhcuimvHfO+Zu7TAAh0+fC3hVI73SsRPe7ipwHjvFTkwDuL9eUqypHuenjbXMWk=", "ARC-Message-Signature": "i=1; a=rsa-sha256; d=sourceware.org; s=key;\n t=1778688310; c=relaxed/simple;\n bh=bpTaIdc12ZuAvzNV1tDpTwS8Wqntmp6f3WYKc2PH6y4=;\n h=From:Date:Subject:To:Message-ID;\n b=qL3Q37HGOlBBN7Wdyw0t2bS7/2wv8s1CBYkpTUUalR5UDZJrSxGUmYoa8C6omdlHrWrWSHIrc4jEe5OHFB3mZvRVQ1YO9wR5SjDXWnHVeBolF7SBjkJ4embCePDwxrSXIR7DTg1tDhOGU/RUl947eDV9G9TpRr/JCIqqp1riwXc=", "ARC-Authentication-Results": "i=1; sourceware.org", "From": "Karl Meakin via Sourceware Forge\n <forge-bot+karmea01@forge-stage.sourceware.org>", "Date": "Wed, 13 May 2026 16:04:12 +0000", "Subject": "[PATCH v1 2/6] aarch64: Port NEON add intrinsics to pragma-based\n framework", "To": "gcc-patches mailing list <gcc-patches@gcc.gnu.org>", "Cc": "ktkachov@nvidia.com, richard.earnshaw@arm.com, tamar.christina@arm.com,\n karl.meakin@arm.com", "Message-ID": "\n <bmm.hihq3sdm4a.gcc.gcc-TEST.karmea01.158.1.2@forge-stage.sourceware.org>", "X-Mailer": "batrachomyomachia", "X-Pull-Request-Organization": "gcc", "X-Pull-Request-Repository": "gcc-TEST", "X-Pull-Request": "https://forge.sourceware.org/gcc/gcc-TEST/pulls/158", "References": "\n <bmm.hihq3sdm4a.gcc.gcc-TEST.karmea01.158.1.0@forge-stage.sourceware.org>", "In-Reply-To": "\n <bmm.hihq3sdm4a.gcc.gcc-TEST.karmea01.158.1.0@forge-stage.sourceware.org>", "X-Patch-URL": "\n https://forge.sourceware.org/karmea01/gcc-TEST/commit/bab47f9ce02b86b8ffae8cc26176efbd9f378eaa", "X-BeenThere": "gcc-patches@gcc.gnu.org", "X-Mailman-Version": "2.1.30", "Precedence": "list", "List-Id": "Gcc-patches mailing list <gcc-patches.gcc.gnu.org>", "List-Unsubscribe": "<https://gcc.gnu.org/mailman/options/gcc-patches>,\n <mailto:gcc-patches-request@gcc.gnu.org?subject=unsubscribe>", "List-Archive": "<https://gcc.gnu.org/pipermail/gcc-patches/>", "List-Post": "<mailto:gcc-patches@gcc.gnu.org>", "List-Help": "<mailto:gcc-patches-request@gcc.gnu.org?subject=help>", "List-Subscribe": "<https://gcc.gnu.org/mailman/listinfo/gcc-patches>,\n <mailto:gcc-patches-request@gcc.gnu.org?subject=subscribe>", "Reply-To": "gcc-patches mailing list <gcc-patches@gcc.gnu.org>,\n ktkachov@nvidia.com, richard.earnshaw@arm.com, tamar.christina@arm.com,\n karl.meakin@arm.com, karmea01@sourceware.org", "Errors-To": "gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org" }, "content": "From: Karl Meakin <karl.meakin@arm.com>\n\nAdd all the necessary infrastructure for defining NEON intrinsics using the pragma-based framework,\nand port the `vadd` family of functions to demonstrate that it works.\n\ngcc/ChangeLog:\n\n\t* config/aarch64/aarch64-neon-builtins-base.cc: New file.\n\t* config/aarch64/aarch64-neon-builtins-base.def: New file.\n\t* config/aarch64/aarch64-neon-builtins-base.h: New file.\n\t* config/aarch64/aarch64-neon-builtins-functions.h: New file.\n\t* config/aarch64/aarch64-neon-builtins-shapes.cc: New file.\n\t* config/aarch64/aarch64-neon-builtins-shapes.h: New file.\n\t* config/aarch64/aarch64-neon-builtins.cc: New file.\n\t* config/aarch64/aarch64-neon-builtins.def: New file.\n\t* config/aarch64/aarch64-neon-builtins.h: New file.\n\t* config.gcc (extra_headers, extra_objs): Add new files and reformat for readability.\n\t* config/aarch64/t-aarch64: Add recipes for new files.\n\t* config/aarch64/aarch64-protos.h (handle_arm_neon_h): Rename to `init_arm_neon_builtins`.\n\t* config/aarch64/aarch64-builtins.cc (handle_arm_neon_h): Likewise.\n\t* config/aarch64/aarch64-c.cc (aarch64_pragma_aarch64): Call\n\t`aarch64_acle::handle_arm_neon_h`.\n\t* config/aarch64/aarch64-sve-builtins.cc (TYPES_*): Move to aarch64-acle-builtins.h\n\t(NONSTREAMING_SVE, SVE_AND_SME, SSVE): Likewise.\n\t* config/aarch64/aarch64-sve-builtins-shapes.cc (build_all): Remove `static` qualifier.\n\t(gimple_folder::fold): Allow folding when `TARGET_SVE` is false but `TARGET_SIMD` is true.\n\t* config/aarch64/aarch64-sve-builtins.def (p8, p16, p64, 128): New type suffixes.\n\t* config/aarch64/aarch64-acle-builtins.h (enum handle_pragma_index): New enum member\n\t`arm_neon_handle`.\n\t(enum type_class_index): New enum member `TYPE_poly`.\n\t(build_all): New declaration, so it can be used from `aarch64-neon-builtins.cc`.\n\t(TYPES_*): Moved from `aarch64-sve-builtins.cc`.\n\t(NONSTREAMING_SVE, SVE_AND_SME, SSVE): Likewise.\n\t* config/aarch64/arm_neon.h (vadd_s8, vadd_s16, vadd_s32, vadd_f32, vadd_f64, vadd_u8,\n\tvadd_u16, vadd_u32, vadd_s64, vadd_u64, vaddq_s8, vaddq_s16, vaddq_s32, vaddq_s64,\n\tvaddq_f32, vaddq_f64, vaddq_u8, vaddq_u16, vaddq_u32, vaddq_u64, vadd_f16, vaddq_f16,\n\tvadd_p8, vadd_p16, vadd_p64, vaddq_p8, vaddq_p16, vaddq_p64, vaddq_p128, vaddd_u64,\n\tvaddd_s64): Delete function definitions.\n\ngcc/testsuite/ChangeLog:\n\n\t* g++.target/aarch64/pr103147-6.C: Fix tests.\n\t* g++.target/aarch64/pr117048.C: Fix tests.\n\t* gcc.target/aarch64/pr103147-6.c: Fix tests.\n\t* gcc.target/aarch64/neon/aarch64-neon.exp: New test.\n\t* gcc.target/aarch64/neon/arm_neon_test.h: New test.\n\t* gcc.target/aarch64/neon/vadd.c: New test.\n---\n gcc/config.gcc | 20 +-\n gcc/config/aarch64/aarch64-acle-builtins.h | 826 +++++++++++++++++\n gcc/config/aarch64/aarch64-builtins.cc | 12 +-\n gcc/config/aarch64/aarch64-c.cc | 3 +-\n .../aarch64/aarch64-neon-builtins-base.cc | 113 +++\n .../aarch64/aarch64-neon-builtins-base.def | 33 +\n .../aarch64/aarch64-neon-builtins-base.h | 29 +\n .../aarch64/aarch64-neon-builtins-functions.h | 29 +\n .../aarch64/aarch64-neon-builtins-shapes.cc | 69 ++\n .../aarch64/aarch64-neon-builtins-shapes.h | 29 +\n gcc/config/aarch64/aarch64-neon-builtins.cc | 86 ++\n gcc/config/aarch64/aarch64-neon-builtins.def | 40 +\n gcc/config/aarch64/aarch64-neon-builtins.h | 28 +\n gcc/config/aarch64/aarch64-protos.h | 2 +-\n .../aarch64/aarch64-sve-builtins-shapes.cc | 16 +-\n gcc/config/aarch64/aarch64-sve-builtins.cc | 855 +-----------------\n gcc/config/aarch64/aarch64-sve-builtins.def | 11 +\n gcc/config/aarch64/arm_neon.h | 204 -----\n gcc/config/aarch64/t-aarch64 | 46 +\n gcc/testsuite/g++.target/aarch64/pr103147-6.C | 1 +\n gcc/testsuite/g++.target/aarch64/pr117048.C | 2 +-\n .../gcc.target/aarch64/neon/aarch64-neon.exp | 39 +\n .../gcc.target/aarch64/neon/arm_neon_test.h | 22 +\n gcc/testsuite/gcc.target/aarch64/neon/vadd.c | 203 +++++\n gcc/testsuite/gcc.target/aarch64/pr103147-6.c | 1 +\n 25 files changed, 1688 insertions(+), 1031 deletions(-)\n create mode 100644 gcc/config/aarch64/aarch64-neon-builtins-base.cc\n create mode 100644 gcc/config/aarch64/aarch64-neon-builtins-base.def\n create mode 100644 gcc/config/aarch64/aarch64-neon-builtins-base.h\n create mode 100644 gcc/config/aarch64/aarch64-neon-builtins-functions.h\n create mode 100644 gcc/config/aarch64/aarch64-neon-builtins-shapes.cc\n create mode 100644 gcc/config/aarch64/aarch64-neon-builtins-shapes.h\n create mode 100644 gcc/config/aarch64/aarch64-neon-builtins.cc\n create mode 100644 gcc/config/aarch64/aarch64-neon-builtins.def\n create mode 100644 gcc/config/aarch64/aarch64-neon-builtins.h\n create mode 100644 gcc/testsuite/gcc.target/aarch64/neon/aarch64-neon.exp\n create mode 100644 gcc/testsuite/gcc.target/aarch64/neon/arm_neon_test.h\n create mode 100644 gcc/testsuite/gcc.target/aarch64/neon/vadd.c", "diff": "diff --git a/gcc/config.gcc b/gcc/config.gcc\nindex 580a7fdee6b5..c125096a65cd 100644\n--- a/gcc/config.gcc\n+++ b/gcc/config.gcc\n@@ -192,7 +192,7 @@\n #\t\t\tthe --with-sysroot configure option or the\n #\t\t\t--sysroot command line option is used this\n #\t\t\twill be relative to the sysroot.\n-# target_type_format_char \n+# target_type_format_char\n # \t\t\tThe default character to be used for formatting\n #\t\t\tthe attribute in a\n #\t\t\t.type symbol_name, ${t_t_f_c}<property>\n@@ -361,7 +361,18 @@ cpu_is_64bit=\n case ${target} in\n aarch64*-*-*)\n \tcpu_type=aarch64\n-\textra_headers=\"arm_fp16.h arm_neon.h arm_bf16.h arm_acle.h arm_sve.h arm_sme.h arm_neon_sve_bridge.h arm_private_fp8.h arm_private_neon_types.h\"\n+\textra_headers=(\n+\t\t'arm_fp16.h'\n+\t\t'arm_neon.h'\n+\t\t'arm_bf16.h'\n+\t\t'arm_acle.h'\n+\t\t'arm_sve.h'\n+\t\t'arm_sme.h'\n+\t\t'arm_neon_sve_bridge.h'\n+\t\t'arm_private_fp8.h'\n+\t\t'arm_private_neon_types.h'\n+\t)\n+\textra_headers=\"${extra_headers[@]}\"\n \tc_target_objs=\"aarch64-c.o\"\n \tcxx_target_objs=\"aarch64-c.o\"\n \td_target_objs=\"aarch64-d.o\"\n@@ -382,6 +393,9 @@ aarch64*-*-*)\n \t\t'aarch64-json-tunings-printer.o'\n \t\t'aarch64-json-tunings-parser.o'\n \t\t'aarch64-narrow-gp-writes.o'\n+\t\t'aarch64-neon-builtins.o'\n+\t\t'aarch64-neon-builtins-base.o'\n+\t\t'aarch64-neon-builtins-shapes.o'\n \t)\n \textra_objs=\"${extra_objs[@]}\"\n \ttarget_gtfiles=(\n@@ -390,6 +404,8 @@ aarch64*-*-*)\n \t\t'$(srcdir)/config/aarch64/aarch64-builtins.cc'\n \t\t'$(srcdir)/config/aarch64/aarch64-acle-builtins.h'\n \t\t'$(srcdir)/config/aarch64/aarch64-sve-builtins.cc'\n+\t\t'$(srcdir)/config/aarch64/aarch64-neon-builtins.cc'\n+\t\t'$(srcdir)/config/aarch64/aarch64-neon-builtins.h'\n \t)\n \ttarget_gtfiles=\"${target_gtfiles[@]}\"\n \ttarget_has_targetm_common=yes\ndiff --git a/gcc/config/aarch64/aarch64-acle-builtins.h b/gcc/config/aarch64/aarch64-acle-builtins.h\nindex 20152aaea6a2..f0511456313e 100644\n--- a/gcc/config/aarch64/aarch64-acle-builtins.h\n+++ b/gcc/config/aarch64/aarch64-acle-builtins.h\n@@ -129,6 +129,7 @@ enum units_index\n /* Enumerates the pragma handlers. */\n enum handle_pragma_index\n {\n+ arm_neon_handle,\n arm_sve_handle,\n arm_sme_handle,\n arm_neon_sve_handle,\n@@ -187,6 +188,7 @@ enum type_class_index\n TYPE_mfloat,\n TYPE_signed,\n TYPE_unsigned,\n+ TYPE_poly,\n NUM_TYPE_CLASSES\n };\n \n@@ -1172,6 +1174,830 @@ function_expander::result_mode () const\n return TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl)));\n }\n \n+void build_all (function_builder &b, const char *signature,\n+\t\tconst function_group_info &group,\n+\t\tmode_suffix_index mode_suffix_id,\n+\t\tbool force_direct_overloads = false);\n+\n+/* Define a TYPES_<combination> macro for each combination of type\n+ suffixes that an ACLE function can have, where <combination> is the\n+ name used in DEF_SVE_FUNCTION entries.\n+\n+ Use S (T) for single type suffix T and D (T1, T2) for a pair of type\n+ suffixes T1 and T2. Use commas to separate the suffixes.\n+\n+ Although the order shouldn't matter, the convention is to sort the\n+ suffixes lexicographically after dividing suffixes into a type\n+ class (\"b\", \"f\", etc.) and a numerical bit count. */\n+\n+/* _b8 _b16 _b32 _b64. */\n+#define TYPES_all_pred(S, D, T) \\\n+ S (b8), S (b16), S (b32), S (b64)\n+\n+/* _c8 _c16 _c32 _c64. */\n+#define TYPES_all_count(S, D, T) \\\n+ S (c8), S (c16), S (c32), S (c64)\n+\n+/* _b8 _b16 _b32 _b64\n+ _c8 _c16 _c32 _c64. */\n+#define TYPES_all_pred_count(S, D, T) \\\n+ TYPES_all_pred (S, D, T), \\\n+ TYPES_all_count (S, D, T)\n+\n+/* _f16 _f32 _f64. */\n+#define TYPES_all_float(S, D, T) \\\n+ S (f16), S (f32), S (f64)\n+\n+/* _s8 _s16 _s32 _s64. */\n+#define TYPES_all_signed(S, D, T) \\\n+ S (s8), S (s16), S (s32), S (s64)\n+\n+/* _f16 _f32 _f64\n+ _s8 _s16 _s32 _s64. */\n+#define TYPES_all_float_and_signed(S, D, T) \\\n+ TYPES_all_float (S, D, T), TYPES_all_signed (S, D, T)\n+\n+/* _u8 _u16 _u32 _u64. */\n+#define TYPES_all_unsigned(S, D, T) \\\n+ S (u8), S (u16), S (u32), S (u64)\n+\n+/* _s8 _s16 _s32 _s64\n+ _u8 _u16 _u32 _u64. */\n+#define TYPES_all_integer(S, D, T) \\\n+ TYPES_all_signed (S, D, T), TYPES_all_unsigned (S, D, T)\n+\n+/* _f16 _f32 _f64\n+ _s8 _s16 _s32 _s64\n+ _u8 _u16 _u32 _u64. */\n+#define TYPES_all_arith(S, D, T) \\\n+ TYPES_all_float (S, D, T), TYPES_all_integer (S, D, T)\n+\n+/*\t _f32 _f64\n+ _s8 _s16 _s32 _s64\n+ _u8 _u16 _u32 _u64. */\n+#define TYPES_all_arith_no_fp16(S, D, T) \\\n+ S (f32), S (f64), \\\n+ TYPES_all_integer (S, D, T)\n+\n+#define TYPES_all_data(S, D, T) \\\n+ TYPES_b_data (S, D, T), \\\n+ TYPES_h_data (S, D, T), \\\n+ TYPES_s_data (S, D, T), \\\n+ TYPES_d_data (S, D, T)\n+\n+/* _b only. */\n+#define TYPES_b(S, D, T) \\\n+ S (b)\n+\n+/* _c only. */\n+#define TYPES_c(S, D, T) \\\n+ S (c)\n+\n+/* _u8. */\n+#define TYPES_b_unsigned(S, D, T) \\\n+ S (u8)\n+\n+/* _s8\n+ _u8. */\n+#define TYPES_b_integer(S, D, T) \\\n+ S (s8), TYPES_b_unsigned (S, D, T)\n+\n+/* _mf8\n+ _s8\n+ _u8. */\n+#define TYPES_b_data(S, D, T) \\\n+ S (mf8), TYPES_b_integer (S, D, T)\n+\n+/* _s8 _s16\n+ _u8 _u16. */\n+#define TYPES_bh_integer(S, D, T) \\\n+ S (s8), S (s16), S (u8), S (u16)\n+\n+/* _u8 _u32. */\n+#define TYPES_bs_unsigned(S, D, T) \\\n+ S (u8), S (u32)\n+\n+/* _s8 _s16 _s32. */\n+#define TYPES_bhs_signed(S, D, T) \\\n+ S (s8), S (s16), S (s32)\n+\n+/* _u8 _u16 _u32. */\n+#define TYPES_bhs_unsigned(S, D, T) \\\n+ S (u8), S (u16), S (u32)\n+\n+/* _s8 _s16 _s32\n+ _u8 _u16 _u32. */\n+#define TYPES_bhs_integer(S, D, T) \\\n+ TYPES_bhs_signed (S, D, T), TYPES_bhs_unsigned (S, D, T)\n+\n+#define TYPES_bh_data(S, D, T)\t\t\t\\\n+ TYPES_b_data (S, D, T), \\\n+ TYPES_h_data (S, D, T)\n+\n+#define TYPES_bhs_data(S, D, T)\t\t\t\\\n+ TYPES_b_data (S, D, T), \\\n+ TYPES_h_data (S, D, T), \\\n+ TYPES_s_data (S, D, T)\n+\n+/* _s16_s8 _s32_s16 _s64_s32\n+ _u16_u8 _u32_u16 _u64_u32. */\n+#define TYPES_bhs_widen(S, D, T) \\\n+ D (s16, s8), D (s32, s16), D (s64, s32), \\\n+ D (u16, u8), D (u32, u16), D (u64, u32)\n+\n+/* _bf16. */\n+#define TYPES_h_bfloat(S, D, T) \\\n+ S (bf16)\n+\n+/* _f16. */\n+#define TYPES_h_float(S, D, T) \\\n+ S (f16)\n+\n+/* _s16\n+ _u16. */\n+#define TYPES_h_integer(S, D, T) \\\n+ S (s16), S (u16)\n+\n+/* _bf16\n+ _f16\n+ _s16\n+ _u16. */\n+#define TYPES_h_data(S, D, T) \\\n+ S (bf16), S (f16), TYPES_h_integer (S, D, T)\n+\n+/* _s16 _s32. */\n+#define TYPES_hs_signed(S, D, T) \\\n+ S (s16), S (s32)\n+\n+/* _s16 _s32\n+ _u16 _u32. */\n+#define TYPES_hs_integer(S, D, T) \\\n+ TYPES_hs_signed (S, D, T), S (u16), S (u32)\n+\n+/* _f16 _f32. */\n+#define TYPES_hs_float(S, D, T) \\\n+ S (f16), S (f32)\n+\n+#define TYPES_hs_data(S, D, T) \\\n+ TYPES_h_data (S, D, T), \\\n+ TYPES_s_data (S, D, T)\n+\n+/* _u16 _u64. */\n+#define TYPES_hd_unsigned(S, D, T) \\\n+ S (u16), S (u64)\n+\n+/* _s16 _s32 _s64. */\n+#define TYPES_hsd_signed(S, D, T) \\\n+ S (s16), S (s32), S (s64)\n+\n+/* _s16 _s32 _s64\n+ _u16 _u32 _u64. */\n+#define TYPES_hsd_integer(S, D, T) \\\n+ TYPES_hsd_signed (S, D, T), S (u16), S (u32), S (u64)\n+\n+#define TYPES_hsd_data(S, D, T) \\\n+ TYPES_h_data (S, D, T), \\\n+ TYPES_s_data (S, D, T), \\\n+ TYPES_d_data (S, D, T)\n+\n+/* _f16_mf8. */\n+#define TYPES_h_float_mf8(S, D, T) \\\n+ D (f16, mf8)\n+\n+/* _f32. */\n+#define TYPES_s_float(S, D, T) \\\n+ S (f32)\n+\n+/* _f32_mf8. */\n+#define TYPES_s_float_mf8(S, D, T) \\\n+ D (f32, mf8)\n+\n+/* _f32\n+ _s16 _s32 _s64\n+ _u16 _u32 _u64. */\n+#define TYPES_s_float_hsd_integer(S, D, T) \\\n+ TYPES_s_float (S, D, T), TYPES_hsd_integer (S, D, T)\n+\n+/* _f32\n+ _s32 _s64\n+ _u32 _u64. */\n+#define TYPES_s_float_sd_integer(S, D, T) \\\n+ TYPES_s_float (S, D, T), TYPES_sd_integer (S, D, T)\n+\n+/* _s32. */\n+#define TYPES_s_signed(S, D, T) \\\n+ S (s32)\n+\n+/* _u32. */\n+#define TYPES_s_unsigned(S, D, T) \\\n+ S (u32)\n+\n+/* _s32\n+ _u32. */\n+#define TYPES_s_integer(S, D, T) \\\n+ TYPES_s_signed (S, D, T), TYPES_s_unsigned (S, D, T)\n+\n+/* _f32\n+ _s32\n+ _u32. */\n+#define TYPES_s_data(S, D, T) \\\n+ TYPES_s_float (S, D, T), TYPES_s_integer (S, D, T)\n+\n+/* _s32 _s64. */\n+#define TYPES_sd_signed(S, D, T) \\\n+ S (s32), S (s64)\n+\n+/* _u32 _u64. */\n+#define TYPES_sd_unsigned(S, D, T) \\\n+ S (u32), S (u64)\n+\n+/* _s32 _s64\n+ _u32 _u64. */\n+#define TYPES_sd_integer(S, D, T) \\\n+ TYPES_sd_signed (S, D, T), TYPES_sd_unsigned (S, D, T)\n+\n+#define TYPES_sd_data(S, D, T) \\\n+ TYPES_s_data (S, D, T), \\\n+ TYPES_d_data (S, D, T)\n+\n+/* _f16 _f32 _f64\n+\t_s32 _s64\n+\t_u32 _u64. */\n+#define TYPES_all_float_and_sd_integer(S, D, T) \\\n+ TYPES_all_float (S, D, T), TYPES_sd_integer (S, D, T)\n+\n+/* _f64. */\n+#define TYPES_d_float(S, D, T) \\\n+ S (f64)\n+\n+/* _u64. */\n+#define TYPES_d_unsigned(S, D, T) \\\n+ S (u64)\n+\n+/* _s64\n+ _u64. */\n+#define TYPES_d_integer(S, D, T) \\\n+ S (s64), TYPES_d_unsigned (S, D, T)\n+\n+/* _f64\n+ _s64\n+ _u64. */\n+#define TYPES_d_data(S, D, T) \\\n+ TYPES_d_float (S, D, T), TYPES_d_integer (S, D, T)\n+\n+/* All the type combinations allowed by svcvt. */\n+#define TYPES_cvt(S, D, T) \\\n+ D (f16, f32), D (f16, f64), \\\n+ D (f16, s16), D (f16, s32), D (f16, s64), \\\n+ D (f16, u16), D (f16, u32), D (f16, u64), \\\n+ \\\n+ D (f32, f16), D (f32, f64), \\\n+ D (f32, s32), D (f32, s64), \\\n+ D (f32, u32), D (f32, u64), \\\n+ \\\n+ D (f64, f16), D (f64, f32), \\\n+ D (f64, s32), D (f64, s64), \\\n+ D (f64, u32), D (f64, u64), \\\n+ \\\n+ D (s16, f16), \\\n+ D (s32, f16), D (s32, f32), D (s32, f64), \\\n+ D (s64, f16), D (s64, f32), D (s64, f64), \\\n+ \\\n+ D (u16, f16), \\\n+ D (u32, f16), D (u32, f32), D (u32, f64), \\\n+ D (u64, f16), D (u64, f32), D (u64, f64)\n+\n+/* _bf16_f32. */\n+#define TYPES_cvt_bfloat(S, D, T) \\\n+ D (bf16, f32)\n+\n+/* { _bf16 _f16 } x _f32. */\n+#define TYPES_cvt_h_s_float(S, D, T) \\\n+ D (bf16, f32), D (f16, f32)\n+\n+/* _f32_f16. */\n+#define TYPES_cvt_f32_f16(S, D, T) \\\n+ D (f32, f16)\n+\n+/* _f32_f16\n+ _f64_f32. */\n+#define TYPES_cvt_long(S, D, T) \\\n+ D (f32, f16), D (f64, f32)\n+\n+/* _f32_f64. */\n+#define TYPES_cvt_narrow_s(S, D, T) \\\n+ D (f32, f64)\n+\n+/* _f16_f32\n+ _f32_f64. */\n+#define TYPES_cvt_narrow(S, D, T) \\\n+ D (f16, f32), TYPES_cvt_narrow_s (S, D, T)\n+\n+/* { _s32 _u32 } x _f32\n+\n+ _f32 x { _s32 _u32 }. */\n+#define TYPES_cvt_s_s(S, D, T) \\\n+ D (s32, f32), \\\n+ D (u32, f32), \\\n+ D (f32, s32), \\\n+ D (f32, u32)\n+\n+/* _f16_mf8\n+ _bf16_mf8. */\n+#define TYPES_cvt_mf8(S, D, T) \\\n+ D (f16, mf8), D (bf16, mf8)\n+\n+/* _mf8_f16\n+ _mf8_bf16. */\n+#define TYPES_cvtn_mf8(S, D, T) \\\n+ D (mf8, f16), D (mf8, bf16)\n+\n+/* _mf8_f32. */\n+#define TYPES_cvtnx_mf8(S, D, T) \\\n+ D (mf8, f32)\n+\n+/* { _s32 _s64 } x { _b8 _b16 _b32 _b64 }\n+ { _u32 _u64 }. */\n+#define TYPES_inc_dec_n1(D, A) \\\n+ D (A, b8), D (A, b16), D (A, b32), D (A, b64)\n+#define TYPES_inc_dec_n(S, D, T) \\\n+ TYPES_inc_dec_n1 (D, s32), \\\n+ TYPES_inc_dec_n1 (D, s64), \\\n+ TYPES_inc_dec_n1 (D, u32), \\\n+ TYPES_inc_dec_n1 (D, u64)\n+\n+/* { _s16 _u16 } x _s32\n+\n+ { _u16 } x _u32. */\n+#define TYPES_qcvt_x2(S, D, T) \\\n+ D (s16, s32), \\\n+ D (u16, u32), \\\n+ D (u16, s32)\n+\n+/* { _s8 _u8 } x _s32\n+\n+ { _u8 } x _u32\n+\n+ { _s16 _u16 } x _s64\n+\n+ { _u16 } x _u64. */\n+#define TYPES_qcvt_x4(S, D, T) \\\n+ D (s8, s32), \\\n+ D (u8, u32), \\\n+ D (u8, s32), \\\n+ D (s16, s64), \\\n+ D (u16, u64), \\\n+ D (u16, s64)\n+\n+/* _s16_s32\n+ _u16_u32. */\n+#define TYPES_qrshr_x2(S, D, T) \\\n+ D (s16, s32), \\\n+ D (u16, u32)\n+\n+/* _u16_s32. */\n+#define TYPES_qrshru_x2(S, D, T) \\\n+ D (u16, s32)\n+\n+/* _s8_s32\n+ _s16_s64\n+ _u8_u32\n+ _u16_u64. */\n+#define TYPES_qrshr_x4(S, D, T) \\\n+ D (s8, s32), \\\n+ D (s16, s64), \\\n+ D (u8, u32), \\\n+ D (u16, u64)\n+\n+/* _u8_s32\n+ _u16_s64. */\n+#define TYPES_qrshru_x4(S, D, T) \\\n+ D (u8, s32), \\\n+ D (u16, s64)\n+\n+/* { _mf8 _bf16\t\t } { _mf8 _bf16\t }\n+ { _f16 _f32 _f64 } { _f16 _f32 _f64 }\n+ { _s8 _s16 _s32 _s64 } x { _s8 _s16 _s32 _s64 }\n+ { _u8 _u16 _u32 _u64 } { _u8 _u16 _u32 _u64 }. */\n+#define TYPES_reinterpret1(D, A) \\\n+ D (A, mf8), \\\n+ D (A, bf16), \\\n+ D (A, f16), D (A, f32), D (A, f64), \\\n+ D (A, s8), D (A, s16), D (A, s32), D (A, s64), \\\n+ D (A, u8), D (A, u16), D (A, u32), D (A, u64)\n+#define TYPES_reinterpret(S, D, T) \\\n+ TYPES_reinterpret1 (D, mf8), \\\n+ TYPES_reinterpret1 (D, bf16), \\\n+ TYPES_reinterpret1 (D, f16), \\\n+ TYPES_reinterpret1 (D, f32), \\\n+ TYPES_reinterpret1 (D, f64), \\\n+ TYPES_reinterpret1 (D, s8), \\\n+ TYPES_reinterpret1 (D, s16), \\\n+ TYPES_reinterpret1 (D, s32), \\\n+ TYPES_reinterpret1 (D, s64), \\\n+ TYPES_reinterpret1 (D, u8), \\\n+ TYPES_reinterpret1 (D, u16), \\\n+ TYPES_reinterpret1 (D, u32), \\\n+ TYPES_reinterpret1 (D, u64)\n+\n+/* _b_c\n+ _c_b. */\n+#define TYPES_reinterpret_b(S, D, T) \\\n+ D (b, c), \\\n+ D (c, b)\n+\n+/* { _b8 _b16 _b32 _b64 } x { _s32 _s64 }\n+\t\t\t { _u32 _u64 } */\n+#define TYPES_while1(D, bn) \\\n+ D (bn, s32), D (bn, s64), D (bn, u32), D (bn, u64)\n+#define TYPES_while(S, D, T) \\\n+ TYPES_while1 (D, b8), \\\n+ TYPES_while1 (D, b16), \\\n+ TYPES_while1 (D, b32), \\\n+ TYPES_while1 (D, b64)\n+\n+/* { _b8 _b16 _b32 _b64 } x { _s64 }\n+\t\t\t { _u64 } */\n+#define TYPES_while_x(S, D, T) \\\n+ D (b8, s64), D (b8, u64), \\\n+ D (b16, s64), D (b16, u64), \\\n+ D (b32, s64), D (b32, u64), \\\n+ D (b64, s64), D (b64, u64)\n+\n+/* { _c8 _c16 _c32 _c64 } x { _s64 }\n+\t\t\t { _u64 } */\n+#define TYPES_while_x_c(S, D, T) \\\n+ D (c8, s64), D (c8, u64), \\\n+ D (c16, s64), D (c16, u64), \\\n+ D (c32, s64), D (c32, u64), \\\n+ D (c64, s64), D (c64, u64)\n+\n+/* _f32_f16\n+ _s32_s16\n+ _u32_u16. */\n+#define TYPES_s_narrow_fsu(S, D, T) \\\n+ D (f32, f16), D (s32, s16), D (u32, u16)\n+\n+/* _za8 _za16 _za32 _za64 _za128. */\n+#define TYPES_all_za(S, D, T) \\\n+ S (za8), S (za16), S (za32), S (za64), S (za128)\n+\n+/* _za64. */\n+#define TYPES_d_za(S, D, T) \\\n+ S (za64)\n+\n+/* { _za8 } x { _mf8 _s8 _u8 }\n+ { _za16 } x { _bf16 _f16 _s16 _u16 }\n+ { _za32 } x { _f32 _s32 _u32 }\n+ { _za64 } x { _f64 _s64 _u64 }. */\n+#define TYPES_za_bhsd_data(S, D, T) \\\n+ D (za8, mf8), D (za8, s8), D (za8, u8), \\\n+ D (za16, bf16), D (za16, f16), D (za16, s16), D (za16, u16), \\\n+ D (za32, f32), D (za32, s32), D (za32, u32), \\\n+ D (za64, f64), D (za64, s64), D (za64, u64)\n+\n+/* Likewise, plus:\n+\n+ { _za128 } x { _bf16\t }\n+\t\t{ _f16 _f32 _f64 }\n+\t\t{ _s8 _s16 _s32 _s64 }\n+\t\t{ _u8 _u16 _u32 _u64 }. */\n+\n+#define TYPES_za_all_data(S, D, T) \\\n+ TYPES_za_bhsd_data (S, D, T), \\\n+ TYPES_reinterpret1 (D, za128)\n+\n+/* _za16_mf8. */\n+#define TYPES_za_h_mf8(S, D, T) \\\n+ D (za16, mf8)\n+\n+/* { _za_16 _za_32 } x _mf8. */\n+#define TYPES_za_hs_mf8(S, D, T) \\\n+ D (za16, mf8), D (za32, mf8)\n+\n+/* _za16_bf16. */\n+#define TYPES_za_h_bfloat(S, D, T) \\\n+ D (za16, bf16)\n+\n+/* _za16_f16. */\n+#define TYPES_za_h_float(S, D, T) \\\n+ D (za16, f16)\n+\n+/* _za32_s8. */\n+#define TYPES_za_s_b_signed(S, D, T) \\\n+ D (za32, s8)\n+\n+/* _za32_u8. */\n+#define TYPES_za_s_b_unsigned(S, D, T) \\\n+ D (za32, u8)\n+\n+/* _za32 x { _s8 _u8 }. */\n+#define TYPES_za_s_b_integer(S, D, T) \\\n+ D (za32, s8), D (za32, u8)\n+\n+/* _za32 x { _s16 _u16 }. */\n+#define TYPES_za_s_h_integer(S, D, T) \\\n+ D (za32, s16), D (za32, u16)\n+\n+/* _za32 x { _bf16 _f16 _s16 _u16 }. */\n+#define TYPES_za_s_h_data(S, D, T) \\\n+ D (za32, bf16), D (za32, f16), D (za32, s16), D (za32, u16)\n+\n+/* _za32_u32. */\n+#define TYPES_za_s_unsigned(S, D, T) \\\n+ D (za32, u32)\n+\n+/* _za32 x { _s32 _u32 }. */\n+#define TYPES_za_s_integer(S, D, T) \\\n+ D (za32, s32), D (za32, u32)\n+\n+/* _za32_mf8. */\n+#define TYPES_za_s_mf8(S, D, T) \\\n+ D (za32, mf8)\n+\n+/* _za32_f32. */\n+#define TYPES_za_s_float(S, D, T) \\\n+ D (za32, f32)\n+\n+/* _za32 x { _f32 _s32 _u32 }. */\n+#define TYPES_za_s_data(S, D, T) \\\n+ D (za32, f32), D (za32, s32), D (za32, u32)\n+\n+/* _za64 x { _s16 _u16 }. */\n+#define TYPES_za_d_h_integer(S, D, T) \\\n+ D (za64, s16), D (za64, u16)\n+\n+/* _za64_f64. */\n+#define TYPES_za_d_float(S, D, T) \\\n+ D (za64, f64)\n+\n+/* _za64 x { _s64 _u64 }. */\n+#define TYPES_za_d_integer(S, D, T) \\\n+ D (za64, s64), D (za64, u64)\n+\n+/* _za32 x { _s8 _u8 _bf16 _f16 _f32 }. */\n+#define TYPES_mop_base(S, D, T) \\\n+ D (za32, s8), D (za32, u8), D (za32, bf16), D (za32, f16), D (za32, f32)\n+\n+/* _za32_s8. */\n+#define TYPES_mop_base_signed(S, D, T) \\\n+ D (za32, s8)\n+\n+/* _za32_u8. */\n+#define TYPES_mop_base_unsigned(S, D, T) \\\n+ D (za32, u8)\n+\n+/* _za64 x { _s16 _u16 }. */\n+#define TYPES_mop_i16i64(S, D, T) \\\n+ D (za64, s16), D (za64, u16)\n+\n+/* _za64_s16. */\n+#define TYPES_mop_i16i64_signed(S, D, T) \\\n+ D (za64, s16)\n+\n+/* _za64_u16. */\n+#define TYPES_mop_i16i64_unsigned(S, D, T) \\\n+ D (za64, u16)\n+\n+/* _za. */\n+#define TYPES_za(S, D, T) \\\n+ S (za)\n+\n+/* _p8 _p16 _p64. */\n+#define TYPES_bhd_poly(S, D, T) \\\n+ S (p8), S (p16), S (p64)\n+\n+/* _p8 _p16 _p64 _p128. */\n+#define TYPES_bhdq_poly(S, D, T) \\\n+ S (p8), S (p16), S (p64), S (p128)\n+\n+/* Describe a tuple of type suffixes in which only the first is used. */\n+#define DEF_VECTOR_TYPE(X) \\\n+ { TYPE_SUFFIX_ ## X, NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES }\n+\n+/* Describe a tuple of type suffixes in which only the first two are used. */\n+#define DEF_DOUBLE_TYPE(X, Y) \\\n+ { TYPE_SUFFIX_ ## X, TYPE_SUFFIX_ ## Y, NUM_TYPE_SUFFIXES }\n+\n+/* Describe a tuple of type suffixes in which three elements are used. */\n+#define DEF_TRIPLE_TYPE(X, Y, Z) \\\n+ { TYPE_SUFFIX_ ## X, TYPE_SUFFIX_ ## Y, TYPE_SUFFIX_ ## Z }\n+\n+/* Create an array that can be used in aarch64-sve-builtins.def to\n+ select the type suffixes in TYPES_<NAME>. */\n+#define DEF_SVE_TYPES_ARRAY(NAME) \\\n+ static const type_suffix_triple types_##NAME[] = { \\\n+ TYPES_##NAME (DEF_VECTOR_TYPE, DEF_DOUBLE_TYPE, DEF_TRIPLE_TYPE), \\\n+ { NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES } \\\n+ }\n+\n+/* For functions that don't take any type suffixes. */\n+static const type_suffix_triple types_none[] = {\n+ { NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES },\n+ { NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES }\n+};\n+\n+/* Create an array for each TYPES_<combination> macro above. */\n+DEF_SVE_TYPES_ARRAY (all_pred);\n+DEF_SVE_TYPES_ARRAY (all_count);\n+DEF_SVE_TYPES_ARRAY (all_pred_count);\n+DEF_SVE_TYPES_ARRAY (all_float);\n+DEF_SVE_TYPES_ARRAY (all_signed);\n+DEF_SVE_TYPES_ARRAY (all_float_and_signed);\n+DEF_SVE_TYPES_ARRAY (all_unsigned);\n+DEF_SVE_TYPES_ARRAY (all_integer);\n+DEF_SVE_TYPES_ARRAY (all_arith);\n+DEF_SVE_TYPES_ARRAY (all_arith_no_fp16);\n+DEF_SVE_TYPES_ARRAY (all_data);\n+DEF_SVE_TYPES_ARRAY (b);\n+DEF_SVE_TYPES_ARRAY (b_unsigned);\n+DEF_SVE_TYPES_ARRAY (b_integer);\n+DEF_SVE_TYPES_ARRAY (bh_integer);\n+DEF_SVE_TYPES_ARRAY (bs_unsigned);\n+DEF_SVE_TYPES_ARRAY (bhs_signed);\n+DEF_SVE_TYPES_ARRAY (bhs_unsigned);\n+DEF_SVE_TYPES_ARRAY (bhs_integer);\n+DEF_SVE_TYPES_ARRAY (bh_data);\n+DEF_SVE_TYPES_ARRAY (bhs_data);\n+DEF_SVE_TYPES_ARRAY (bhs_widen);\n+DEF_SVE_TYPES_ARRAY (c);\n+DEF_SVE_TYPES_ARRAY (h_bfloat);\n+DEF_SVE_TYPES_ARRAY (h_float);\n+DEF_SVE_TYPES_ARRAY (h_float_mf8);\n+DEF_SVE_TYPES_ARRAY (h_integer);\n+DEF_SVE_TYPES_ARRAY (h_data);\n+DEF_SVE_TYPES_ARRAY (hs_signed);\n+DEF_SVE_TYPES_ARRAY (hs_integer);\n+DEF_SVE_TYPES_ARRAY (hs_float);\n+DEF_SVE_TYPES_ARRAY (hs_data);\n+DEF_SVE_TYPES_ARRAY (hd_unsigned);\n+DEF_SVE_TYPES_ARRAY (hsd_signed);\n+DEF_SVE_TYPES_ARRAY (hsd_integer);\n+DEF_SVE_TYPES_ARRAY (hsd_data);\n+DEF_SVE_TYPES_ARRAY (s_float);\n+DEF_SVE_TYPES_ARRAY (s_float_hsd_integer);\n+DEF_SVE_TYPES_ARRAY (s_float_mf8);\n+DEF_SVE_TYPES_ARRAY (s_float_sd_integer);\n+DEF_SVE_TYPES_ARRAY (s_signed);\n+DEF_SVE_TYPES_ARRAY (s_unsigned);\n+DEF_SVE_TYPES_ARRAY (s_integer);\n+DEF_SVE_TYPES_ARRAY (s_data);\n+DEF_SVE_TYPES_ARRAY (sd_signed);\n+DEF_SVE_TYPES_ARRAY (sd_unsigned);\n+DEF_SVE_TYPES_ARRAY (sd_integer);\n+DEF_SVE_TYPES_ARRAY (sd_data);\n+DEF_SVE_TYPES_ARRAY (all_float_and_sd_integer);\n+DEF_SVE_TYPES_ARRAY (d_float);\n+DEF_SVE_TYPES_ARRAY (d_unsigned);\n+DEF_SVE_TYPES_ARRAY (d_integer);\n+DEF_SVE_TYPES_ARRAY (d_data);\n+DEF_SVE_TYPES_ARRAY (cvt);\n+DEF_SVE_TYPES_ARRAY (cvt_bfloat);\n+DEF_SVE_TYPES_ARRAY (cvt_h_s_float);\n+DEF_SVE_TYPES_ARRAY (cvt_f32_f16);\n+DEF_SVE_TYPES_ARRAY (cvt_long);\n+DEF_SVE_TYPES_ARRAY (cvt_mf8);\n+DEF_SVE_TYPES_ARRAY (cvt_narrow_s);\n+DEF_SVE_TYPES_ARRAY (cvt_narrow);\n+DEF_SVE_TYPES_ARRAY (cvt_s_s);\n+DEF_SVE_TYPES_ARRAY (cvtn_mf8);\n+DEF_SVE_TYPES_ARRAY (cvtnx_mf8);\n+DEF_SVE_TYPES_ARRAY (inc_dec_n);\n+DEF_SVE_TYPES_ARRAY (qcvt_x2);\n+DEF_SVE_TYPES_ARRAY (qcvt_x4);\n+DEF_SVE_TYPES_ARRAY (qrshr_x2);\n+DEF_SVE_TYPES_ARRAY (qrshr_x4);\n+DEF_SVE_TYPES_ARRAY (qrshru_x2);\n+DEF_SVE_TYPES_ARRAY (qrshru_x4);\n+DEF_SVE_TYPES_ARRAY (reinterpret);\n+DEF_SVE_TYPES_ARRAY (reinterpret_b);\n+DEF_SVE_TYPES_ARRAY (while);\n+DEF_SVE_TYPES_ARRAY (while_x);\n+DEF_SVE_TYPES_ARRAY (while_x_c);\n+DEF_SVE_TYPES_ARRAY (s_narrow_fsu);\n+DEF_SVE_TYPES_ARRAY (all_za);\n+DEF_SVE_TYPES_ARRAY (d_za);\n+DEF_SVE_TYPES_ARRAY (za_bhsd_data);\n+DEF_SVE_TYPES_ARRAY (za_all_data);\n+DEF_SVE_TYPES_ARRAY (za_h_mf8);\n+DEF_SVE_TYPES_ARRAY (za_h_bfloat);\n+DEF_SVE_TYPES_ARRAY (za_h_float);\n+DEF_SVE_TYPES_ARRAY (za_s_b_signed);\n+DEF_SVE_TYPES_ARRAY (za_s_b_unsigned);\n+DEF_SVE_TYPES_ARRAY (za_s_b_integer);\n+DEF_SVE_TYPES_ARRAY (za_s_h_integer);\n+DEF_SVE_TYPES_ARRAY (za_s_h_data);\n+DEF_SVE_TYPES_ARRAY (za_s_unsigned);\n+DEF_SVE_TYPES_ARRAY (za_s_integer);\n+DEF_SVE_TYPES_ARRAY (za_s_mf8);\n+DEF_SVE_TYPES_ARRAY (za_hs_mf8);\n+DEF_SVE_TYPES_ARRAY (za_s_float);\n+DEF_SVE_TYPES_ARRAY (za_s_data);\n+DEF_SVE_TYPES_ARRAY (za_d_h_integer);\n+DEF_SVE_TYPES_ARRAY (za_d_float);\n+DEF_SVE_TYPES_ARRAY (za_d_integer);\n+DEF_SVE_TYPES_ARRAY (mop_base);\n+DEF_SVE_TYPES_ARRAY (mop_base_signed);\n+DEF_SVE_TYPES_ARRAY (mop_base_unsigned);\n+DEF_SVE_TYPES_ARRAY (mop_i16i64);\n+DEF_SVE_TYPES_ARRAY (mop_i16i64_signed);\n+DEF_SVE_TYPES_ARRAY (mop_i16i64_unsigned);\n+DEF_SVE_TYPES_ARRAY (za);\n+\n+DEF_SVE_TYPES_ARRAY (bhd_poly);\n+DEF_SVE_TYPES_ARRAY (bhdq_poly);\n+\n+static const group_suffix_index groups_none[] = {\n+ GROUP_none, NUM_GROUP_SUFFIXES\n+};\n+\n+static const group_suffix_index groups_x2[] = { GROUP_x2, NUM_GROUP_SUFFIXES };\n+\n+static const group_suffix_index groups_x12[] = {\n+ GROUP_none, GROUP_x2, NUM_GROUP_SUFFIXES\n+};\n+\n+static const group_suffix_index groups_x4[] = { GROUP_x4, NUM_GROUP_SUFFIXES };\n+\n+static const group_suffix_index groups_x24[] = {\n+ GROUP_x2, GROUP_x4, NUM_GROUP_SUFFIXES\n+};\n+\n+static const group_suffix_index groups_x124[] = {\n+ GROUP_none, GROUP_x2, GROUP_x4, NUM_GROUP_SUFFIXES\n+};\n+\n+static const group_suffix_index groups_x1234[] = {\n+ GROUP_none, GROUP_x2, GROUP_x3, GROUP_x4, NUM_GROUP_SUFFIXES\n+};\n+\n+static const group_suffix_index groups_vg1x2[] = {\n+ GROUP_vg1x2, NUM_GROUP_SUFFIXES\n+};\n+\n+static const group_suffix_index groups_vg1x4[] = {\n+ GROUP_vg1x4, NUM_GROUP_SUFFIXES\n+};\n+\n+static const group_suffix_index groups_vg1x24[] = {\n+ GROUP_vg1x2, GROUP_vg1x4, NUM_GROUP_SUFFIXES\n+};\n+\n+static const group_suffix_index groups_vg2[] = {\n+ GROUP_vg2x1, GROUP_vg2x2, GROUP_vg2x4, NUM_GROUP_SUFFIXES\n+};\n+\n+static const group_suffix_index groups_vg4[] = {\n+ GROUP_vg4x1, GROUP_vg4x2, GROUP_vg4x4, NUM_GROUP_SUFFIXES\n+};\n+\n+static const group_suffix_index groups_vg24[] = {\n+ GROUP_vg2, GROUP_vg4, NUM_GROUP_SUFFIXES\n+};\n+\n+/* Used by functions that have no governing predicate. */\n+static const predication_index preds_none[] = { PRED_none, NUM_PREDS };\n+\n+/* Used by functions that have a governing predicate but do not have an\n+ explicit suffix. */\n+static const predication_index preds_implicit[] = { PRED_implicit, NUM_PREDS };\n+\n+/* Used by functions that only support \"_m\" predication. */\n+static const predication_index preds_m[] = { PRED_m, NUM_PREDS };\n+\n+/* Used by functions that allow merging and \"don't care\" predication,\n+ but are not suitable for predicated MOVPRFX. */\n+static const predication_index preds_mx[] = {\n+ PRED_m, PRED_x, NUM_PREDS\n+};\n+\n+/* Used by functions that allow merging, zeroing and \"don't care\"\n+ predication. */\n+static const predication_index preds_mxz[] = {\n+ PRED_m, PRED_x, PRED_z, NUM_PREDS\n+};\n+\n+/* Used by functions that have the mxz predicated forms above, and in addition\n+ have an unpredicated form. */\n+static const predication_index preds_mxz_or_none[] = {\n+ PRED_m, PRED_x, PRED_z, PRED_none, NUM_PREDS\n+};\n+\n+/* Used by functions that allow merging and zeroing predication but have\n+ no \"_x\" form. */\n+static const predication_index preds_mz[] = { PRED_m, PRED_z, NUM_PREDS };\n+\n+/* Used by functions that have an unpredicated form and a _z predicated\n+ form. */\n+static const predication_index preds_z_or_none[] = {\n+ PRED_z, PRED_none, NUM_PREDS\n+};\n+\n+/* Used by (mostly predicate) functions that only support \"_z\" predication. */\n+static const predication_index preds_z[] = { PRED_z, NUM_PREDS };\n+\n+/* Used by SME instructions that always merge into ZA. */\n+static const predication_index preds_za_m[] = { PRED_za_m, NUM_PREDS };\n }\n \n #endif\ndiff --git a/gcc/config/aarch64/aarch64-builtins.cc b/gcc/config/aarch64/aarch64-builtins.cc\nindex 611f6dc45e0a..e9e237f65aae 100644\n--- a/gcc/config/aarch64/aarch64-builtins.cc\n+++ b/gcc/config/aarch64/aarch64-builtins.cc\n@@ -52,6 +52,7 @@\n #include \"tree-pass.h\"\n #include \"tree-vector-builder.h\"\n #include \"aarch64-builtins.h\"\n+#include \"aarch64-neon-builtins.h\"\n \n using namespace aarch64;\n \n@@ -1938,12 +1939,11 @@ aarch64_target_switcher::~aarch64_target_switcher ()\n \t sizeof (have_regs_of_mode));\n }\n \n-/* Implement #pragma GCC aarch64 \"arm_neon.h\".\n-\n- The types and functions defined here need to be available internally\n- during LTO as well. */\n+/* Initialize NEON builtins using the old framework.\n+ Delete once NEON all intrinsics have been ported to the pragma-based\n+ framework. */\n void\n-handle_arm_neon_h (void)\n+init_arm_neon_builtins (void)\n {\n aarch64_target_switcher switcher (AARCH64_FL_SIMD);\n \n@@ -1971,7 +1971,7 @@ aarch64_init_simd_builtins (void)\n \n aarch64_init_simd_builtin_functions (false);\n if (in_lto_p)\n- handle_arm_neon_h ();\n+ init_arm_neon_builtins ();\n \n /* Initialize the remaining fcmla_laneq intrinsics. */\n aarch64_init_fcmla_laneq_builtins ();\ndiff --git a/gcc/config/aarch64/aarch64-c.cc b/gcc/config/aarch64/aarch64-c.cc\nindex ef2475154e85..85842152862b 100644\n--- a/gcc/config/aarch64/aarch64-c.cc\n+++ b/gcc/config/aarch64/aarch64-c.cc\n@@ -32,6 +32,7 @@\n #include \"c-family/c-pragma.h\"\n #include \"langhooks.h\"\n #include \"target.h\"\n+#include \"aarch64-neon-builtins.h\"\n \n \n #define builtin_define(TXT) cpp_define (pfile, TXT)\n@@ -409,7 +410,7 @@ aarch64_pragma_aarch64 (cpp_reader *)\n else if (strcmp (name, \"arm_sme.h\") == 0)\n aarch64_acle::handle_arm_sme_h (false);\n else if (strcmp (name, \"arm_neon.h\") == 0)\n- handle_arm_neon_h ();\n+ aarch64_acle::handle_arm_neon_h (false);\n else if (strcmp (name, \"arm_acle.h\") == 0)\n handle_arm_acle_h ();\n else if (strcmp (name, \"arm_neon_sve_bridge.h\") == 0)\ndiff --git a/gcc/config/aarch64/aarch64-neon-builtins-base.cc b/gcc/config/aarch64/aarch64-neon-builtins-base.cc\nnew file mode 100644\nindex 000000000000..4c3c33c56629\n--- /dev/null\n+++ b/gcc/config/aarch64/aarch64-neon-builtins-base.cc\n@@ -0,0 +1,113 @@\n+/* ACLE support for AArch64 NEON (__ARM_FEATURE_SIMD intrinsics)\n+ Copyright (C) 2026-2026 Free Software Foundation, Inc.\n+\n+ This file is part of GCC.\n+\n+ GCC is free software; you can redistribute it and/or modify it\n+ under the terms of the GNU General Public License as published by\n+ the Free Software Foundation; either version 3, or (at your option)\n+ any later version.\n+\n+ GCC is distributed in the hope that it will be useful, but\n+ WITHOUT ANY WARRANTY; without even the implied warranty of\n+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n+ General Public License for more details.\n+\n+ You should have received a copy of the GNU General Public License\n+ along with GCC; see the file COPYING3. If not see\n+ <http://www.gnu.org/licenses/>. */\n+\n+#include \"config.h\"\n+#include \"system.h\"\n+#include \"coretypes.h\"\n+#include \"tm.h\"\n+#include \"tree.h\"\n+#include \"rtl.h\"\n+#include \"tm_p.h\"\n+#include \"memmodel.h\"\n+#include \"insn-codes.h\"\n+#include \"optabs.h\"\n+#include \"recog.h\"\n+#include \"expr.h\"\n+#include \"basic-block.h\"\n+#include \"function.h\"\n+#include \"fold-const.h\"\n+#include \"gimple.h\"\n+#include \"gimple-iterator.h\"\n+#include \"gimplify.h\"\n+#include \"explow.h\"\n+#include \"tree-vector-builder.h\"\n+#include \"rtx-vector-builder.h\"\n+#include \"vec-perm-indices.h\"\n+#include \"aarch64-acle-builtins.h\"\n+#include \"aarch64-neon-builtins-base.h\"\n+#include \"aarch64-neon-builtins-functions.h\"\n+#include \"aarch64-neon-builtins.h\"\n+#include \"gimple-fold.h\"\n+\n+using namespace aarch64_acle;\n+\n+/* Base class for all function expanders.\n+ At least one of `expand` or `fold` must be overriden by derived classes. */\n+class gimple_function_base : public function_base\n+{\n+ rtx expand (function_expander &) const override { gcc_unreachable (); }\n+ gimple *fold (gimple_folder &) const override { gcc_unreachable (); }\n+};\n+\n+/* For intrinsics that map to a single GIMPLE expression with no argument\n+ preparation necessary. */\n+class gimple_expr : public gimple_function_base\n+{\n+ tree_code m_int_code;\n+ tree_code m_float_code;\n+ tree_code m_poly_code;\n+\n+public:\n+ constexpr gimple_expr (tree_code code)\n+ : m_int_code (code),\n+ m_float_code (code),\n+ m_poly_code (code)\n+ {}\n+\n+ constexpr gimple_expr (tree_code int_code,\n+\t\t\t tree_code float_code,\n+\t\t\t tree_code poly_code)\n+ : m_int_code (int_code),\n+ m_float_code (float_code),\n+ m_poly_code (poly_code)\n+ {}\n+\n+ gimple *fold (gimple_folder &f) const override\n+ {\n+ auto nargs = gimple_call_num_args (f.call);\n+ auto arg0 = nargs >= 1 ? gimple_call_arg (f.call, 0) : nullptr;\n+ auto arg1 = nargs >= 2 ? gimple_call_arg (f.call, 1) : nullptr;\n+ auto arg2 = nargs >= 3 ? gimple_call_arg (f.call, 2) : nullptr;\n+\n+ tree_code code;\n+ auto type_class = f.type_suffix (0).tclass;\n+ switch (type_class)\n+ {\n+ case TYPE_signed:\n+ case TYPE_unsigned:\n+\tcode = m_int_code;\n+\tbreak;\n+ case TYPE_float:\n+\tcode = m_float_code;\n+\tbreak;\n+ case TYPE_poly:\n+\tcode = m_poly_code;\n+\tbreak;\n+ default:\n+\tgcc_unreachable ();\n+ }\n+\n+ return gimple_build_assign (f.lhs, code, arg0, arg1, arg2);\n+ }\n+};\n+\n+// Lanewise arithmetic\n+NEON_FUNCTION (vaddd, gimple_expr, (PLUS_EXPR))\n+NEON_FUNCTION (vadd, gimple_expr, (PLUS_EXPR, PLUS_EXPR, BIT_XOR_EXPR))\n+NEON_FUNCTION (vaddq, gimple_expr, (PLUS_EXPR, PLUS_EXPR, BIT_XOR_EXPR))\ndiff --git a/gcc/config/aarch64/aarch64-neon-builtins-base.def b/gcc/config/aarch64/aarch64-neon-builtins-base.def\nnew file mode 100644\nindex 000000000000..c8077d96a7dd\n--- /dev/null\n+++ b/gcc/config/aarch64/aarch64-neon-builtins-base.def\n@@ -0,0 +1,33 @@\n+/* ACLE support for AArch64 NEON (__ARM_FEATURE_SIMD intrinsics)\n+ Copyright (C) 2026-2026 Free Software Foundation, Inc.\n+\n+ This file is part of GCC.\n+\n+ GCC is free software; you can redistribute it and/or modify it\n+ under the terms of the GNU General Public License as published by\n+ the Free Software Foundation; either version 3, or (at your option)\n+ any later version.\n+\n+ GCC is distributed in the hope that it will be useful, but\n+ WITHOUT ANY WARRANTY; without even the implied warranty of\n+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n+ General Public License for more details.\n+\n+ You should have received a copy of the GNU General Public License\n+ along with GCC; see the file COPYING3. If not see\n+ <http://www.gnu.org/licenses/>. */\n+\n+// Lanewise arithmetic\n+#define REQUIRED_EXTENSIONS nonstreaming_only (AARCH64_FL_SIMD)\n+DEF_NEON_FUNCTION (vaddd, d_integer,\t (\"s0,s0,s0\"))\n+DEF_NEON_FUNCTION (vadd, all_arith_no_fp16, (\"D0,D0,D0\"))\n+DEF_NEON_FUNCTION (vadd, bhd_poly,\t (\"D0,D0,D0\"))\n+DEF_NEON_FUNCTION (vaddq, all_arith_no_fp16, (\"Q0,Q0,Q0\"))\n+DEF_NEON_FUNCTION (vaddq, bhdq_poly,\t (\"Q0,Q0,Q0\"))\n+#undef REQUIRED_EXTENSIONS\n+\n+// Lanewise arithmetic (FP16)\n+#define REQUIRED_EXTENSIONS nonstreaming_only (AARCH64_FL_F16)\n+DEF_NEON_FUNCTION (vadd, h_float, (\"D0,D0,D0\"))\n+DEF_NEON_FUNCTION (vaddq, h_float, (\"Q0,Q0,Q0\"))\n+#undef REQUIRED_EXTENSIONS\ndiff --git a/gcc/config/aarch64/aarch64-neon-builtins-base.h b/gcc/config/aarch64/aarch64-neon-builtins-base.h\nnew file mode 100644\nindex 000000000000..9612bef42f26\n--- /dev/null\n+++ b/gcc/config/aarch64/aarch64-neon-builtins-base.h\n@@ -0,0 +1,29 @@\n+/* ACLE support for AArch64 NEON (__ARM_FEATURE_SIMD intrinsics)\n+ Copyright (C) 2026-2026 Free Software Foundation, Inc.\n+\n+ This file is part of GCC.\n+\n+ GCC is free software; you can redistribute it and/or modify it\n+ under the terms of the GNU General Public License as published by\n+ the Free Software Foundation; either version 3, or (at your option)\n+ any later version.\n+\n+ GCC is distributed in the hope that it will be useful, but\n+ WITHOUT ANY WARRANTY; without even the implied warranty of\n+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n+ General Public License for more details.\n+\n+ You should have received a copy of the GNU General Public License\n+ along with GCC; see the file COPYING3. If not see\n+ <http://www.gnu.org/licenses/>. */\n+\n+#ifndef GCC_AARCH64_NEON_BUILTINS_BASE_H\n+#define GCC_AARCH64_NEON_BUILTINS_BASE_H\n+\n+namespace aarch64_acle::functions {\n+#define DEF_NEON_FUNCTION(NAME, ...) \\\n+ extern const aarch64_acle::function_base *const NAME;\n+#include \"aarch64-neon-builtins.def\"\n+}\n+\n+#endif\ndiff --git a/gcc/config/aarch64/aarch64-neon-builtins-functions.h b/gcc/config/aarch64/aarch64-neon-builtins-functions.h\nnew file mode 100644\nindex 000000000000..58a631ac54e0\n--- /dev/null\n+++ b/gcc/config/aarch64/aarch64-neon-builtins-functions.h\n@@ -0,0 +1,29 @@\n+/* ACLE support for AArch64 NEON (function_base classes)\n+ Copyright (C) 2026-2026 Free Software Foundation, Inc.\n+\n+ This file is part of GCC.\n+\n+ GCC is free software; you can redistribute it and/or modify it\n+ under the terms of the GNU General Public License as published by\n+ the Free Software Foundation; either version 3, or (at your option)\n+ any later version.\n+\n+ GCC is distributed in the hope that it will be useful, but\n+ WITHOUT ANY WARRANTY; without even the implied warranty of\n+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n+ General Public License for more details.\n+\n+ You should have received a copy of the GNU General Public License\n+ along with GCC; see the file COPYING3. If not see\n+ <http://www.gnu.org/licenses/>. */\n+\n+#ifndef GCC_AARCH64_NEON_BUILTINS_FUNCTIONS_H\n+#define GCC_AARCH64_NEON_BUILTINS_FUNCTIONS_H\n+\n+/* Declare the global function base NAME, creating it from an instance\n+ of class CLASS with constructor arguments ARGS. */\n+#define NEON_FUNCTION(NAME, CLASS, ARGS)\t\t\t\t\\\n+ namespace { static constexpr const CLASS NAME##_obj ARGS; }\t\t\\\n+ const function_base *const aarch64_acle::functions::NAME = &NAME##_obj;\n+\n+#endif\ndiff --git a/gcc/config/aarch64/aarch64-neon-builtins-shapes.cc b/gcc/config/aarch64/aarch64-neon-builtins-shapes.cc\nnew file mode 100644\nindex 000000000000..7946a7675eb5\n--- /dev/null\n+++ b/gcc/config/aarch64/aarch64-neon-builtins-shapes.cc\n@@ -0,0 +1,69 @@\n+/* ACLE support for AArch64 NEON (function shapes)\n+ Copyright (C) 2026-2026 Free Software Foundation, Inc.\n+\n+ This file is part of GCC.\n+\n+ GCC is free software; you can redistribute it and/or modify it\n+ under the terms of the GNU General Public License as published by\n+ the Free Software Foundation; either version 3, or (at your option)\n+ any later version.\n+\n+ GCC is distributed in the hope that it will be useful, but\n+ WITHOUT ANY WARRANTY; without even the implied warranty of\n+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n+ General Public License for more details.\n+\n+ You should have received a copy of the GNU General Public License\n+ along with GCC; see the file COPYING3. If not see\n+ <http://www.gnu.org/licenses/>. */\n+\n+#define INCLUDE_ALGORITHM\n+#include \"config.h\"\n+#include \"system.h\"\n+#include \"coretypes.h\"\n+#include \"tm.h\"\n+#include \"basic-block.h\"\n+#include \"tree.h\"\n+#include \"function.h\"\n+#include \"gimple.h\"\n+#include \"rtl.h\"\n+#include \"tm_p.h\"\n+#include \"memmodel.h\"\n+#include \"insn-codes.h\"\n+#include \"optabs.h\"\n+#include \"aarch64-acle-builtins.h\"\n+#include \"aarch64-sve-builtins-shapes.h\"\n+#include \"aarch64-builtins.h\"\n+\n+using namespace aarch64_acle;\n+\n+/* All NEON functions are non-overloaded, so we don't need bespoke\n+ function shapes. Instead, we can just use a single shape for all NEON\n+ functions, parameterised by a signature. */\n+struct neon_shape : public function_shape\n+{\n+ constexpr neon_shape (const char *signature)\n+ : m_signature (signature)\n+ {}\n+\n+ const char *m_signature;\n+\n+ void build (function_builder &b,\n+\t const function_group_info &group) const override\n+ {\n+ aarch64_acle::build_all (b, this->m_signature, group, MODE_none);\n+ }\n+\n+ bool check (function_checker &) const override { return true; }\n+\n+ bool explicit_type_suffix_p (unsigned int) const override { return true; }\n+ tree resolve (function_resolver &) const override { gcc_unreachable (); }\n+};\n+\n+namespace aarch64_acle::shapes {\n+#define DEF_NEON_FUNCTION(NAME, TYPES, SHAPE_ARGS)\t\t\t\\\n+ static constexpr const neon_shape OBJ_NAME (NAME, TYPES) SHAPE_ARGS;\t\\\n+ const aarch64_acle::function_shape *SHAPE_NAME (NAME, TYPES)\t\t\\\n+ = &OBJ_NAME (NAME, TYPES);\n+#include \"aarch64-neon-builtins.def\"\n+}\ndiff --git a/gcc/config/aarch64/aarch64-neon-builtins-shapes.h b/gcc/config/aarch64/aarch64-neon-builtins-shapes.h\nnew file mode 100644\nindex 000000000000..c94f4c994643\n--- /dev/null\n+++ b/gcc/config/aarch64/aarch64-neon-builtins-shapes.h\n@@ -0,0 +1,29 @@\n+/* ACLE support for AArch64 NEON (function shapes)\n+ Copyright (C) 2026-2026 Free Software Foundation, Inc.\n+\n+ This file is part of GCC.\n+\n+ GCC is free software; you can redistribute it and/or modify it\n+ under the terms of the GNU General Public License as published by\n+ the Free Software Foundation; either version 3, or (at your option)\n+ any later version.\n+\n+ GCC is distributed in the hope that it will be useful, but\n+ WITHOUT ANY WARRANTY; without even the implied warranty of\n+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n+ General Public License for more details.\n+\n+ You should have received a copy of the GNU General Public License\n+ along with GCC; see the file COPYING3. If not see\n+ <http://www.gnu.org/licenses/>. */\n+\n+#ifndef GCC_AARCH64_NEON_BUILTINS_SHAPES_H\n+#define GCC_AARCH64_NEON_BUILTINS_SHAPES_H\n+\n+namespace aarch64_acle::shapes {\n+#define DEF_NEON_FUNCTION(NAME, TYPES, SHAPE_ARGS) \\\n+ extern const aarch64_acle::function_shape *const SHAPE_NAME (NAME, TYPES);\n+#include \"aarch64-neon-builtins.def\"\n+}\n+\n+#endif\ndiff --git a/gcc/config/aarch64/aarch64-neon-builtins.cc b/gcc/config/aarch64/aarch64-neon-builtins.cc\nnew file mode 100644\nindex 000000000000..7159b265ec9c\n--- /dev/null\n+++ b/gcc/config/aarch64/aarch64-neon-builtins.cc\n@@ -0,0 +1,86 @@\n+/* ACLE support for AArch64 NEON\n+ Copyright (C) 2026-2026 Free Software Foundation, Inc.\n+\n+ This file is part of GCC.\n+\n+ GCC is free software; you can redistribute it and/or modify it\n+ under the terms of the GNU General Public License as published by\n+ the Free Software Foundation; either version 3, or (at your option)\n+ any later version.\n+\n+ GCC is distributed in the hope that it will be useful, but\n+ WITHOUT ANY WARRANTY; without even the implied warranty of\n+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n+ General Public License for more details.\n+\n+ You should have received a copy of the GNU General Public License\n+ along with GCC; see the file COPYING3. If not see\n+ <http://www.gnu.org/licenses/>. */\n+\n+#define IN_TARGET_CODE 1\n+\n+#include \"config.h\"\n+#include \"system.h\"\n+#include \"coretypes.h\"\n+#include \"tm.h\"\n+#include \"tree.h\"\n+#include \"rtl.h\"\n+#include \"tm_p.h\"\n+#include \"memmodel.h\"\n+#include \"insn-codes.h\"\n+#include \"optabs.h\"\n+#include \"diagnostic.h\"\n+#include \"expr.h\"\n+#include \"basic-block.h\"\n+#include \"function.h\"\n+#include \"gimple.h\"\n+#include \"gimple-iterator.h\"\n+#include \"gimplify.h\"\n+#include \"explow.h\"\n+#include \"aarch64-acle-builtins.h\"\n+#include \"aarch64-sve-builtins-base.h\"\n+#include \"aarch64-sve-builtins-shapes.h\"\n+#include \"aarch64-neon-builtins-shapes.h\"\n+#include \"aarch64-neon-builtins-functions.h\"\n+#include \"aarch64-neon-builtins-base.h\"\n+#include \"aarch64-builtins.h\"\n+\n+/* Implement `#pragma GCC aarch64 \"arm_neon\"`. */\n+namespace aarch64_acle {\n+constexpr const aarch64_acle::function_group_info neon_function_groups[] = {\n+#define DEF_NEON_FUNCTION(NAME, TYPES, SHAPE_ARGS)\t\t\t\\\n+ {\t\t\t\t\t\t\t\t\t\\\n+ /* .base_name = */ #NAME,\t\t\t\t\t\t\\\n+ /* .base = */ &aarch64_acle::functions::NAME,\t\t\t\\\n+ /* .shape = */ &aarch64_acle::shapes::SHAPE_NAME (NAME, TYPES),\\\n+ /* .types = */ aarch64_acle::types_##TYPES,\t\t\t\\\n+ /* .groups = */ aarch64_acle::groups_none,\t\t\t\\\n+ /* .preds = */ aarch64_acle::preds_none,\t\t\t\\\n+ /* .extensions = */ aarch64_required_extensions::REQUIRED_EXTENSIONS,\\\n+ /* .fpm_mode = */ aarch64_acle::FPM_unused,\t\t\t\\\n+ },\n+#include \"aarch64-neon-builtins.def\"\n+};\n+\n+bool arm_neon_h_handled = false;\n+\n+void\n+handle_arm_neon_h (bool function_nulls_p)\n+{\n+ if (arm_neon_h_handled)\n+ return;\n+\n+ /* FIXME: Remove once all NEON intrinsics have been ported to the pragma-based\n+ framework. */\n+ init_arm_neon_builtins ();\n+\n+ aarch64_target_switcher switcher;\n+ aarch64_acle::function_builder builder (aarch64_acle::arm_neon_handle,\n+\t\t\t\t\t function_nulls_p);\n+\n+ for (auto &group : neon_function_groups)\n+ builder.register_function_group (group);\n+\n+ arm_neon_h_handled = true;\n+}\n+};\ndiff --git a/gcc/config/aarch64/aarch64-neon-builtins.def b/gcc/config/aarch64/aarch64-neon-builtins.def\nnew file mode 100644\nindex 000000000000..58630eebe489\n--- /dev/null\n+++ b/gcc/config/aarch64/aarch64-neon-builtins.def\n@@ -0,0 +1,40 @@\n+/* ACLE support for AArch64 NEON (__ARM_FEATURE_SIMD intrinsics)\n+ Copyright (C) 2026-2026 Free Software Foundation, Inc.\n+\n+ This file is part of GCC.\n+\n+ GCC is free software; you can redistribute it and/or modify it\n+ under the terms of the GNU General Public License as published by\n+ the Free Software Foundation; either version 3, or (at your option)\n+ any later version.\n+\n+ GCC is distributed in the hope that it will be useful, but\n+ WITHOUT ANY WARRANTY; without even the implied warranty of\n+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n+ General Public License for more details.\n+\n+ You should have received a copy of the GNU General Public License\n+ along with GCC; see the file COPYING3. If not see\n+ <http://www.gnu.org/licenses/>. */\n+\n+/* Code organization: See block comment at the top of\n+ aarch64-sve-builtins.def. */\n+\n+/* Define a new function group. */\n+#ifndef DEF_NEON_FUNCTION\n+#define DEF_NEON_FUNCTION(NAME, TYPES, SHAPE_ARGS)\n+#endif\n+\n+/* Helper for generating the name of the function_group's corresponding\n+ neon_shape instance. */\n+#define OBJ_NAME(NAME, TYPES) NAME ## _ ## TYPES ## _obj\n+\n+/* Helper for generating the name of the function_group's corresponding\n+ function_shape. */\n+#define SHAPE_NAME(NAME, TYPES) NAME ## _ ## TYPES ## _shape\n+\n+#include \"aarch64-neon-builtins-base.def\"\n+\n+#undef DEF_NEON_FUNCTION\n+#undef OBJ_NAME\n+#undef SHAPE_NAME\ndiff --git a/gcc/config/aarch64/aarch64-neon-builtins.h b/gcc/config/aarch64/aarch64-neon-builtins.h\nnew file mode 100644\nindex 000000000000..c3bf53674cbc\n--- /dev/null\n+++ b/gcc/config/aarch64/aarch64-neon-builtins.h\n@@ -0,0 +1,28 @@\n+/* ACLE support for AArch64 NEON\n+ Copyright (C) 2026-2026 Free Software Foundation, Inc.\n+\n+ This file is part of GCC.\n+\n+ GCC is free software; you can redistribute it and/or modify it\n+ under the terms of the GNU General Public License as published by\n+ the Free Software Foundation; either version 3, or (at your option)\n+ any later version.\n+\n+ GCC is distributed in the hope that it will be useful, but\n+ WITHOUT ANY WARRANTY; without even the implied warranty of\n+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n+ General Public License for more details.\n+\n+ You should have received a copy of the GNU General Public License\n+ along with GCC; see the file COPYING3. If not see\n+ <http://www.gnu.org/licenses/>. */\n+\n+#ifndef GCC_AARCH64_NEON_BUILTINS_H\n+#define GCC_AARCH64_NEON_BUILTINS_H\n+\n+namespace aarch64_acle {\n+extern bool arm_neon_h_handled;\n+void handle_arm_neon_h (bool);\n+};\n+\n+#endif\ndiff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h\nindex b794cd7de664..1610f54986e6 100644\n--- a/gcc/config/aarch64/aarch64-protos.h\n+++ b/gcc/config/aarch64/aarch64-protos.h\n@@ -1155,7 +1155,7 @@ tree aarch64_general_builtin_decl (unsigned, bool);\n tree aarch64_general_builtin_rsqrt (unsigned int);\n void aarch64_ms_variadic_abi_init_builtins (void);\n void handle_arm_acle_h (void);\n-void handle_arm_neon_h (void);\n+void init_arm_neon_builtins (void);\n \n bool aarch64_check_required_extensions (location_t, tree,\n \t\t\t\t\taarch64_required_extensions);\ndiff --git a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc\nindex 5fb65dd8a319..b51115359eaf 100644\n--- a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc\n+++ b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc\n@@ -277,6 +277,18 @@ parse_type (const function_instance &instance, const char *&format)\n if (ch == 's')\n {\n type_suffix_index suffix = parse_element_type (instance, format);\n+\n+ // HACK: remove once all NEON intrinsics have been ported to the\n+ // pragma-based framework.\n+ if (suffix == TYPE_SUFFIX_p8)\n+\treturn aarch64_simd_types_trees[Poly8_t].eltype;\n+ if (suffix == TYPE_SUFFIX_p16)\n+\treturn aarch64_simd_types_trees[Poly16_t].eltype;\n+ if (suffix == TYPE_SUFFIX_p64)\n+\treturn aarch64_simd_types_trees[Poly64_t].eltype;\n+ if (suffix == TYPE_SUFFIX_p128)\n+\treturn aarch64_simd_types_trees[Poly128_t].eltype;\n+\n return scalar_types[type_suffixes[suffix].vector_type];\n }\n \n@@ -530,10 +542,10 @@ build_vs_offset (function_builder &b, const char *signature,\n predicate. FORCE_DIRECT_OVERLOADS is true if there is a one-to-one\n mapping between \"short\" and \"full\" names, and if standard overload\n resolution therefore isn't necessary. */\n-static void\n+void\n build_all (function_builder &b, const char *signature,\n \t const function_group_info &group, mode_suffix_index mode_suffix_id,\n-\t bool force_direct_overloads = false)\n+\t bool force_direct_overloads)\n {\n for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)\n for (unsigned int gi = 0; group.groups[gi] != NUM_GROUP_SUFFIXES; ++gi)\ndiff --git a/gcc/config/aarch64/aarch64-sve-builtins.cc b/gcc/config/aarch64/aarch64-sve-builtins.cc\nindex da96da69f273..6f5244ae81d2 100644\n--- a/gcc/config/aarch64/aarch64-sve-builtins.cc\n+++ b/gcc/config/aarch64/aarch64-sve-builtins.cc\n@@ -55,6 +55,7 @@\n #include \"aarch64-sve-builtins-sme.h\"\n #include \"aarch64-sve-builtins-shapes.h\"\n #include \"aarch64-builtins.h\"\n+#include \"aarch64-neon-builtins.h\"\n \n using namespace aarch64;\n \n@@ -180,814 +181,6 @@ CONSTEXPR const group_suffix_info group_suffixes[] = {\n { \"\", 0, 1 }\n };\n \n-/* Define a TYPES_<combination> macro for each combination of type\n- suffixes that an ACLE function can have, where <combination> is the\n- name used in DEF_SVE_FUNCTION entries.\n-\n- Use S (T) for single type suffix T and D (T1, T2) for a pair of type\n- suffixes T1 and T2. Use commas to separate the suffixes.\n-\n- Although the order shouldn't matter, the convention is to sort the\n- suffixes lexicographically after dividing suffixes into a type\n- class (\"b\", \"f\", etc.) and a numerical bit count. */\n-\n-/* _b8 _b16 _b32 _b64. */\n-#define TYPES_all_pred(S, D, T) \\\n- S (b8), S (b16), S (b32), S (b64)\n-\n-/* _c8 _c16 _c32 _c64. */\n-#define TYPES_all_count(S, D, T) \\\n- S (c8), S (c16), S (c32), S (c64)\n-\n-/* _b8 _b16 _b32 _b64\n- _c8 _c16 _c32 _c64. */\n-#define TYPES_all_pred_count(S, D, T) \\\n- TYPES_all_pred (S, D, T), \\\n- TYPES_all_count (S, D, T)\n-\n-/* _f16 _f32 _f64. */\n-#define TYPES_all_float(S, D, T) \\\n- S (f16), S (f32), S (f64)\n-\n-/* _s8 _s16 _s32 _s64. */\n-#define TYPES_all_signed(S, D, T) \\\n- S (s8), S (s16), S (s32), S (s64)\n-\n-/* _f16 _f32 _f64\n- _s8 _s16 _s32 _s64. */\n-#define TYPES_all_float_and_signed(S, D, T) \\\n- TYPES_all_float (S, D, T), TYPES_all_signed (S, D, T)\n-\n-/* _u8 _u16 _u32 _u64. */\n-#define TYPES_all_unsigned(S, D, T) \\\n- S (u8), S (u16), S (u32), S (u64)\n-\n-/* _s8 _s16 _s32 _s64\n- _u8 _u16 _u32 _u64. */\n-#define TYPES_all_integer(S, D, T) \\\n- TYPES_all_signed (S, D, T), TYPES_all_unsigned (S, D, T)\n-\n-/* _f16 _f32 _f64\n- _s8 _s16 _s32 _s64\n- _u8 _u16 _u32 _u64. */\n-#define TYPES_all_arith(S, D, T) \\\n- TYPES_all_float (S, D, T), TYPES_all_integer (S, D, T)\n-\n-#define TYPES_all_data(S, D, T) \\\n- TYPES_b_data (S, D, T), \\\n- TYPES_h_data (S, D, T), \\\n- TYPES_s_data (S, D, T), \\\n- TYPES_d_data (S, D, T)\n-\n-/* _b only. */\n-#define TYPES_b(S, D, T) \\\n- S (b)\n-\n-/* _c only. */\n-#define TYPES_c(S, D, T) \\\n- S (c)\n-\n-/* _u8. */\n-#define TYPES_b_unsigned(S, D, T) \\\n- S (u8)\n-\n-/* _s8\n- _u8. */\n-#define TYPES_b_integer(S, D, T) \\\n- S (s8), TYPES_b_unsigned (S, D, T)\n-\n-/* _mf8\n- _s8\n- _u8. */\n-#define TYPES_b_data(S, D, T) \\\n- S (mf8), TYPES_b_integer (S, D, T)\n-\n-/* _s8 _s16\n- _u8 _u16. */\n-#define TYPES_bh_integer(S, D, T) \\\n- S (s8), S (s16), S (u8), S (u16)\n-\n-/* _u8 _u32. */\n-#define TYPES_bs_unsigned(S, D, T) \\\n- S (u8), S (u32)\n-\n-/* _s8 _s16 _s32. */\n-#define TYPES_bhs_signed(S, D, T) \\\n- S (s8), S (s16), S (s32)\n-\n-/* _u8 _u16 _u32. */\n-#define TYPES_bhs_unsigned(S, D, T) \\\n- S (u8), S (u16), S (u32)\n-\n-/* _s8 _s16 _s32\n- _u8 _u16 _u32. */\n-#define TYPES_bhs_integer(S, D, T) \\\n- TYPES_bhs_signed (S, D, T), TYPES_bhs_unsigned (S, D, T)\n-\n-#define TYPES_bh_data(S, D, T)\t\t\t\\\n- TYPES_b_data (S, D, T), \\\n- TYPES_h_data (S, D, T)\n-\n-#define TYPES_bhs_data(S, D, T)\t\t\t\\\n- TYPES_b_data (S, D, T), \\\n- TYPES_h_data (S, D, T), \\\n- TYPES_s_data (S, D, T)\n-\n-/* _s16_s8 _s32_s16 _s64_s32\n- _u16_u8 _u32_u16 _u64_u32. */\n-#define TYPES_bhs_widen(S, D, T) \\\n- D (s16, s8), D (s32, s16), D (s64, s32), \\\n- D (u16, u8), D (u32, u16), D (u64, u32)\n-\n-/* _bf16. */\n-#define TYPES_h_bfloat(S, D, T) \\\n- S (bf16)\n-\n-/* _f16. */\n-#define TYPES_h_float(S, D, T) \\\n- S (f16)\n-\n-/* _s16\n- _u16. */\n-#define TYPES_h_integer(S, D, T) \\\n- S (s16), S (u16)\n-\n-/* _bf16\n- _f16\n- _s16\n- _u16. */\n-#define TYPES_h_data(S, D, T) \\\n- S (bf16), S (f16), TYPES_h_integer (S, D, T)\n-\n-/* _s16 _s32. */\n-#define TYPES_hs_signed(S, D, T) \\\n- S (s16), S (s32)\n-\n-/* _s16 _s32\n- _u16 _u32. */\n-#define TYPES_hs_integer(S, D, T) \\\n- TYPES_hs_signed (S, D, T), S (u16), S (u32)\n-\n-/* _f16 _f32. */\n-#define TYPES_hs_float(S, D, T) \\\n- S (f16), S (f32)\n-\n-#define TYPES_hs_data(S, D, T) \\\n- TYPES_h_data (S, D, T), \\\n- TYPES_s_data (S, D, T)\n-\n-/* _u16 _u64. */\n-#define TYPES_hd_unsigned(S, D, T) \\\n- S (u16), S (u64)\n-\n-/* _s16 _s32 _s64. */\n-#define TYPES_hsd_signed(S, D, T) \\\n- S (s16), S (s32), S (s64)\n-\n-/* _s16 _s32 _s64\n- _u16 _u32 _u64. */\n-#define TYPES_hsd_integer(S, D, T) \\\n- TYPES_hsd_signed (S, D, T), S (u16), S (u32), S (u64)\n-\n-#define TYPES_hsd_data(S, D, T) \\\n- TYPES_h_data (S, D, T), \\\n- TYPES_s_data (S, D, T), \\\n- TYPES_d_data (S, D, T)\n-\n-/* _f16_mf8. */\n-#define TYPES_h_float_mf8(S, D, T) \\\n- D (f16, mf8)\n-\n-/* _f32. */\n-#define TYPES_s_float(S, D, T) \\\n- S (f32)\n-\n-/* _f32_mf8. */\n-#define TYPES_s_float_mf8(S, D, T) \\\n- D (f32, mf8)\n-\n-/* _f32\n- _s16 _s32 _s64\n- _u16 _u32 _u64. */\n-#define TYPES_s_float_hsd_integer(S, D, T) \\\n- TYPES_s_float (S, D, T), TYPES_hsd_integer (S, D, T)\n-\n-/* _f32\n- _s32 _s64\n- _u32 _u64. */\n-#define TYPES_s_float_sd_integer(S, D, T) \\\n- TYPES_s_float (S, D, T), TYPES_sd_integer (S, D, T)\n-\n-/* _s32. */\n-#define TYPES_s_signed(S, D, T) \\\n- S (s32)\n-\n-/* _u32. */\n-#define TYPES_s_unsigned(S, D, T) \\\n- S (u32)\n-\n-/* _s32\n- _u32. */\n-#define TYPES_s_integer(S, D, T) \\\n- TYPES_s_signed (S, D, T), TYPES_s_unsigned (S, D, T)\n-\n-/* _f32\n- _s32\n- _u32. */\n-#define TYPES_s_data(S, D, T) \\\n- TYPES_s_float (S, D, T), TYPES_s_integer (S, D, T)\n-\n-/* _s32 _s64. */\n-#define TYPES_sd_signed(S, D, T) \\\n- S (s32), S (s64)\n-\n-/* _u32 _u64. */\n-#define TYPES_sd_unsigned(S, D, T) \\\n- S (u32), S (u64)\n-\n-/* _s32 _s64\n- _u32 _u64. */\n-#define TYPES_sd_integer(S, D, T) \\\n- TYPES_sd_signed (S, D, T), TYPES_sd_unsigned (S, D, T)\n-\n-#define TYPES_sd_data(S, D, T) \\\n- TYPES_s_data (S, D, T), \\\n- TYPES_d_data (S, D, T)\n-\n-/* _f16 _f32 _f64\n-\t_s32 _s64\n-\t_u32 _u64. */\n-#define TYPES_all_float_and_sd_integer(S, D, T) \\\n- TYPES_all_float (S, D, T), TYPES_sd_integer (S, D, T)\n-\n-/* _f64. */\n-#define TYPES_d_float(S, D, T) \\\n- S (f64)\n-\n-/* _u64. */\n-#define TYPES_d_unsigned(S, D, T) \\\n- S (u64)\n-\n-/* _s64\n- _u64. */\n-#define TYPES_d_integer(S, D, T) \\\n- S (s64), TYPES_d_unsigned (S, D, T)\n-\n-/* _f64\n- _s64\n- _u64. */\n-#define TYPES_d_data(S, D, T) \\\n- TYPES_d_float (S, D, T), TYPES_d_integer (S, D, T)\n-\n-/* All the type combinations allowed by svcvt. */\n-#define TYPES_cvt(S, D, T) \\\n- D (f16, f32), D (f16, f64), \\\n- D (f16, s16), D (f16, s32), D (f16, s64), \\\n- D (f16, u16), D (f16, u32), D (f16, u64), \\\n- \\\n- D (f32, f16), D (f32, f64), \\\n- D (f32, s32), D (f32, s64), \\\n- D (f32, u32), D (f32, u64), \\\n- \\\n- D (f64, f16), D (f64, f32), \\\n- D (f64, s32), D (f64, s64), \\\n- D (f64, u32), D (f64, u64), \\\n- \\\n- D (s16, f16), \\\n- D (s32, f16), D (s32, f32), D (s32, f64), \\\n- D (s64, f16), D (s64, f32), D (s64, f64), \\\n- \\\n- D (u16, f16), \\\n- D (u32, f16), D (u32, f32), D (u32, f64), \\\n- D (u64, f16), D (u64, f32), D (u64, f64)\n-\n-/* _bf16_f32. */\n-#define TYPES_cvt_bfloat(S, D, T) \\\n- D (bf16, f32)\n-\n-/* { _bf16 _f16 } x _f32. */\n-#define TYPES_cvt_h_s_float(S, D, T) \\\n- D (bf16, f32), D (f16, f32)\n-\n-/* _f32_f16. */\n-#define TYPES_cvt_f32_f16(S, D, T) \\\n- D (f32, f16)\n-\n-/* _f32_f16\n- _f64_f32. */\n-#define TYPES_cvt_long(S, D, T) \\\n- D (f32, f16), D (f64, f32)\n-\n-/* _f32_f64. */\n-#define TYPES_cvt_narrow_s(S, D, T) \\\n- D (f32, f64)\n-\n-/* _f16_f32\n- _f32_f64. */\n-#define TYPES_cvt_narrow(S, D, T) \\\n- D (f16, f32), TYPES_cvt_narrow_s (S, D, T)\n-\n-/* { _s32 _u32 } x _f32\n-\n- _f32 x { _s32 _u32 }. */\n-#define TYPES_cvt_s_s(S, D, T) \\\n- D (s32, f32), \\\n- D (u32, f32), \\\n- D (f32, s32), \\\n- D (f32, u32)\n-\n-/* _f16_mf8\n- _bf16_mf8. */\n-#define TYPES_cvt_mf8(S, D, T) \\\n- D (f16, mf8), D (bf16, mf8)\n-\n-/* _mf8_f16\n- _mf8_bf16. */\n-#define TYPES_cvtn_mf8(S, D, T) \\\n- D (mf8, f16), D (mf8, bf16)\n-\n-/* _mf8_f32. */\n-#define TYPES_cvtnx_mf8(S, D, T) \\\n- D (mf8, f32)\n-\n-/* { _s32 _s64 } x { _b8 _b16 _b32 _b64 }\n- { _u32 _u64 }. */\n-#define TYPES_inc_dec_n1(D, A) \\\n- D (A, b8), D (A, b16), D (A, b32), D (A, b64)\n-#define TYPES_inc_dec_n(S, D, T) \\\n- TYPES_inc_dec_n1 (D, s32), \\\n- TYPES_inc_dec_n1 (D, s64), \\\n- TYPES_inc_dec_n1 (D, u32), \\\n- TYPES_inc_dec_n1 (D, u64)\n-\n-/* { _s16 _u16 } x _s32\n-\n- { _u16 } x _u32. */\n-#define TYPES_qcvt_x2(S, D, T) \\\n- D (s16, s32), \\\n- D (u16, u32), \\\n- D (u16, s32)\n-\n-/* { _s8 _u8 } x _s32\n-\n- { _u8 } x _u32\n-\n- { _s16 _u16 } x _s64\n-\n- { _u16 } x _u64. */\n-#define TYPES_qcvt_x4(S, D, T) \\\n- D (s8, s32), \\\n- D (u8, u32), \\\n- D (u8, s32), \\\n- D (s16, s64), \\\n- D (u16, u64), \\\n- D (u16, s64)\n-\n-/* _s16_s32\n- _u16_u32. */\n-#define TYPES_qrshr_x2(S, D, T) \\\n- D (s16, s32), \\\n- D (u16, u32)\n-\n-/* _u16_s32. */\n-#define TYPES_qrshru_x2(S, D, T) \\\n- D (u16, s32)\n-\n-/* _s8_s32\n- _s16_s64\n- _u8_u32\n- _u16_u64. */\n-#define TYPES_qrshr_x4(S, D, T) \\\n- D (s8, s32), \\\n- D (s16, s64), \\\n- D (u8, u32), \\\n- D (u16, u64)\n-\n-/* _u8_s32\n- _u16_s64. */\n-#define TYPES_qrshru_x4(S, D, T) \\\n- D (u8, s32), \\\n- D (u16, s64)\n-\n-/* { _mf8 _bf16 } { _mf8 _bf16 }\n- { _f16 _f32 _f64 } { _f16 _f32 _f64 }\n- { _s8 _s16 _s32 _s64 } x { _s8 _s16 _s32 _s64 }\n- { _u8 _u16 _u32 _u64 } { _u8 _u16 _u32 _u64 }. */\n-#define TYPES_reinterpret1(D, A) \\\n- D (A, mf8), \\\n- D (A, bf16), \\\n- D (A, f16), D (A, f32), D (A, f64), \\\n- D (A, s8), D (A, s16), D (A, s32), D (A, s64), \\\n- D (A, u8), D (A, u16), D (A, u32), D (A, u64)\n-#define TYPES_reinterpret(S, D, T) \\\n- TYPES_reinterpret1 (D, mf8), \\\n- TYPES_reinterpret1 (D, bf16), \\\n- TYPES_reinterpret1 (D, f16), \\\n- TYPES_reinterpret1 (D, f32), \\\n- TYPES_reinterpret1 (D, f64), \\\n- TYPES_reinterpret1 (D, s8), \\\n- TYPES_reinterpret1 (D, s16), \\\n- TYPES_reinterpret1 (D, s32), \\\n- TYPES_reinterpret1 (D, s64), \\\n- TYPES_reinterpret1 (D, u8), \\\n- TYPES_reinterpret1 (D, u16), \\\n- TYPES_reinterpret1 (D, u32), \\\n- TYPES_reinterpret1 (D, u64)\n-\n-/* _b_c\n- _c_b. */\n-#define TYPES_reinterpret_b(S, D, T) \\\n- D (b, c), \\\n- D (c, b)\n-\n-/* { _b8 _b16 _b32 _b64 } x { _s32 _s64 }\n-\t\t\t { _u32 _u64 } */\n-#define TYPES_while1(D, bn) \\\n- D (bn, s32), D (bn, s64), D (bn, u32), D (bn, u64)\n-#define TYPES_while(S, D, T) \\\n- TYPES_while1 (D, b8), \\\n- TYPES_while1 (D, b16), \\\n- TYPES_while1 (D, b32), \\\n- TYPES_while1 (D, b64)\n-\n-/* { _b8 _b16 _b32 _b64 } x { _s64 }\n-\t\t\t { _u64 } */\n-#define TYPES_while_x(S, D, T) \\\n- D (b8, s64), D (b8, u64), \\\n- D (b16, s64), D (b16, u64), \\\n- D (b32, s64), D (b32, u64), \\\n- D (b64, s64), D (b64, u64)\n-\n-/* { _c8 _c16 _c32 _c64 } x { _s64 }\n-\t\t\t { _u64 } */\n-#define TYPES_while_x_c(S, D, T) \\\n- D (c8, s64), D (c8, u64), \\\n- D (c16, s64), D (c16, u64), \\\n- D (c32, s64), D (c32, u64), \\\n- D (c64, s64), D (c64, u64)\n-\n-/* _f32_f16\n- _s32_s16\n- _u32_u16. */\n-#define TYPES_s_narrow_fsu(S, D, T) \\\n- D (f32, f16), D (s32, s16), D (u32, u16)\n-\n-/* _za8 _za16 _za32 _za64 _za128. */\n-#define TYPES_all_za(S, D, T) \\\n- S (za8), S (za16), S (za32), S (za64), S (za128)\n-\n-/* _za64. */\n-#define TYPES_d_za(S, D, T) \\\n- S (za64)\n-\n-/* { _za8 } x { _mf8 _s8 _u8 }\n-\n- { _za16 } x { _bf16 _f16 _s16 _u16 }\n-\n- { _za32 } x { _f32 _s32 _u32 }\n-\n- { _za64 } x { _f64 _s64 _u64 }. */\n-#define TYPES_za_bhsd_data(S, D, T) \\\n- D (za8, mf8), D (za8, s8), D (za8, u8), \\\n- D (za16, bf16), D (za16, f16), D (za16, s16), D (za16, u16), \\\n- D (za32, f32), D (za32, s32), D (za32, u32), \\\n- D (za64, f64), D (za64, s64), D (za64, u64)\n-\n-/* Likewise, plus:\n-\n- { _za128 } x { _bf16 }\n-\t\t{ _f16 _f32 _f64 }\n-\t\t{ _s8 _s16 _s32 _s64 }\n-\t\t{ _u8 _u16 _u32 _u64 }. */\n-\n-#define TYPES_za_all_data(S, D, T) \\\n- TYPES_za_bhsd_data (S, D, T), \\\n- TYPES_reinterpret1 (D, za128)\n-\n-/* _za16_mf8. */\n-#define TYPES_za_h_mf8(S, D, T) \\\n- D (za16, mf8)\n-\n-/* { _za_16 _za_32 } x _mf8. */\n-#define TYPES_za_hs_mf8(S, D, T) \\\n- D (za16, mf8), D (za32, mf8)\n-\n-/* _za16_bf16. */\n-#define TYPES_za_h_bfloat(S, D, T) \\\n- D (za16, bf16)\n-\n-/* _za16_f16. */\n-#define TYPES_za_h_float(S, D, T) \\\n- D (za16, f16)\n-\n-/* _za32_s8. */\n-#define TYPES_za_s_b_signed(S, D, T) \\\n- D (za32, s8)\n-\n-/* _za32_u8. */\n-#define TYPES_za_s_b_unsigned(S, D, T) \\\n- D (za32, u8)\n-\n-/* _za32 x { _s8 _u8 }. */\n-#define TYPES_za_s_b_integer(S, D, T) \\\n- D (za32, s8), D (za32, u8)\n-\n-/* _za32 x { _s16 _u16 }. */\n-#define TYPES_za_s_h_integer(S, D, T) \\\n- D (za32, s16), D (za32, u16)\n-\n-/* _za32 x { _bf16 _f16 _s16 _u16 }. */\n-#define TYPES_za_s_h_data(S, D, T) \\\n- D (za32, bf16), D (za32, f16), D (za32, s16), D (za32, u16)\n-\n-/* _za32_u32. */\n-#define TYPES_za_s_unsigned(S, D, T) \\\n- D (za32, u32)\n-\n-/* _za32 x { _s32 _u32 }. */\n-#define TYPES_za_s_integer(S, D, T) \\\n- D (za32, s32), D (za32, u32)\n-\n-/* _za32_mf8. */\n-#define TYPES_za_s_mf8(S, D, T) \\\n- D (za32, mf8)\n-\n-/* _za32_f32. */\n-#define TYPES_za_s_float(S, D, T) \\\n- D (za32, f32)\n-\n-/* _za32 x { _f32 _s32 _u32 }. */\n-#define TYPES_za_s_data(S, D, T) \\\n- D (za32, f32), D (za32, s32), D (za32, u32)\n-\n-/* _za64 x { _s16 _u16 }. */\n-#define TYPES_za_d_h_integer(S, D, T) \\\n- D (za64, s16), D (za64, u16)\n-\n-/* _za64_f64. */\n-#define TYPES_za_d_float(S, D, T) \\\n- D (za64, f64)\n-\n-/* _za64 x { _s64 _u64 }. */\n-#define TYPES_za_d_integer(S, D, T) \\\n- D (za64, s64), D (za64, u64)\n-\n-/* _za32 x { _s8 _u8 _bf16 _f16 _f32 }. */\n-#define TYPES_mop_base(S, D, T) \\\n- D (za32, s8), D (za32, u8), D (za32, bf16), D (za32, f16), D (za32, f32)\n-\n-/* _za32_s8. */\n-#define TYPES_mop_base_signed(S, D, T) \\\n- D (za32, s8)\n-\n-/* _za32_u8. */\n-#define TYPES_mop_base_unsigned(S, D, T) \\\n- D (za32, u8)\n-\n-/* _za64 x { _s16 _u16 }. */\n-#define TYPES_mop_i16i64(S, D, T) \\\n- D (za64, s16), D (za64, u16)\n-\n-/* _za64_s16. */\n-#define TYPES_mop_i16i64_signed(S, D, T) \\\n- D (za64, s16)\n-\n-/* _za64_u16. */\n-#define TYPES_mop_i16i64_unsigned(S, D, T) \\\n- D (za64, u16)\n-\n-/* _za. */\n-#define TYPES_za(S, D, T) \\\n- S (za)\n-\n-/* Describe a tuple of type suffixes in which only the first is used. */\n-#define DEF_VECTOR_TYPE(X) \\\n- { TYPE_SUFFIX_ ## X, NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES }\n-\n-/* Describe a tuple of type suffixes in which only the first two are used. */\n-#define DEF_DOUBLE_TYPE(X, Y) \\\n- { TYPE_SUFFIX_ ## X, TYPE_SUFFIX_ ## Y, NUM_TYPE_SUFFIXES }\n-\n-/* Describe a tuple of type suffixes in which three elements are used. */\n-#define DEF_TRIPLE_TYPE(X, Y, Z) \\\n- { TYPE_SUFFIX_ ## X, TYPE_SUFFIX_ ## Y, TYPE_SUFFIX_ ## Z }\n-\n-/* Create an array that can be used in aarch64-sve-builtins.def to\n- select the type suffixes in TYPES_<NAME>. */\n-#define DEF_SVE_TYPES_ARRAY(NAME) \\\n- static const type_suffix_triple types_##NAME[] = { \\\n- TYPES_##NAME (DEF_VECTOR_TYPE, DEF_DOUBLE_TYPE, DEF_TRIPLE_TYPE), \\\n- { NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES } \\\n- }\n-\n-/* For functions that don't take any type suffixes. */\n-static const type_suffix_triple types_none[] = {\n- { NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES },\n- { NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES, NUM_TYPE_SUFFIXES }\n-};\n-\n-/* Create an array for each TYPES_<combination> macro above. */\n-DEF_SVE_TYPES_ARRAY (all_pred);\n-DEF_SVE_TYPES_ARRAY (all_count);\n-DEF_SVE_TYPES_ARRAY (all_pred_count);\n-DEF_SVE_TYPES_ARRAY (all_float);\n-DEF_SVE_TYPES_ARRAY (all_signed);\n-DEF_SVE_TYPES_ARRAY (all_float_and_signed);\n-DEF_SVE_TYPES_ARRAY (all_unsigned);\n-DEF_SVE_TYPES_ARRAY (all_integer);\n-DEF_SVE_TYPES_ARRAY (all_arith);\n-DEF_SVE_TYPES_ARRAY (all_data);\n-DEF_SVE_TYPES_ARRAY (b);\n-DEF_SVE_TYPES_ARRAY (b_unsigned);\n-DEF_SVE_TYPES_ARRAY (b_integer);\n-DEF_SVE_TYPES_ARRAY (bh_integer);\n-DEF_SVE_TYPES_ARRAY (bs_unsigned);\n-DEF_SVE_TYPES_ARRAY (bhs_signed);\n-DEF_SVE_TYPES_ARRAY (bhs_unsigned);\n-DEF_SVE_TYPES_ARRAY (bhs_integer);\n-DEF_SVE_TYPES_ARRAY (bh_data);\n-DEF_SVE_TYPES_ARRAY (bhs_data);\n-DEF_SVE_TYPES_ARRAY (bhs_widen);\n-DEF_SVE_TYPES_ARRAY (c);\n-DEF_SVE_TYPES_ARRAY (h_bfloat);\n-DEF_SVE_TYPES_ARRAY (h_float);\n-DEF_SVE_TYPES_ARRAY (h_float_mf8);\n-DEF_SVE_TYPES_ARRAY (h_integer);\n-DEF_SVE_TYPES_ARRAY (h_data);\n-DEF_SVE_TYPES_ARRAY (hs_signed);\n-DEF_SVE_TYPES_ARRAY (hs_integer);\n-DEF_SVE_TYPES_ARRAY (hs_float);\n-DEF_SVE_TYPES_ARRAY (hs_data);\n-DEF_SVE_TYPES_ARRAY (hd_unsigned);\n-DEF_SVE_TYPES_ARRAY (hsd_signed);\n-DEF_SVE_TYPES_ARRAY (hsd_integer);\n-DEF_SVE_TYPES_ARRAY (hsd_data);\n-DEF_SVE_TYPES_ARRAY (s_float);\n-DEF_SVE_TYPES_ARRAY (s_float_hsd_integer);\n-DEF_SVE_TYPES_ARRAY (s_float_mf8);\n-DEF_SVE_TYPES_ARRAY (s_float_sd_integer);\n-DEF_SVE_TYPES_ARRAY (s_signed);\n-DEF_SVE_TYPES_ARRAY (s_unsigned);\n-DEF_SVE_TYPES_ARRAY (s_integer);\n-DEF_SVE_TYPES_ARRAY (s_data);\n-DEF_SVE_TYPES_ARRAY (sd_signed);\n-DEF_SVE_TYPES_ARRAY (sd_unsigned);\n-DEF_SVE_TYPES_ARRAY (sd_integer);\n-DEF_SVE_TYPES_ARRAY (sd_data);\n-DEF_SVE_TYPES_ARRAY (all_float_and_sd_integer);\n-DEF_SVE_TYPES_ARRAY (d_float);\n-DEF_SVE_TYPES_ARRAY (d_unsigned);\n-DEF_SVE_TYPES_ARRAY (d_integer);\n-DEF_SVE_TYPES_ARRAY (d_data);\n-DEF_SVE_TYPES_ARRAY (cvt);\n-DEF_SVE_TYPES_ARRAY (cvt_bfloat);\n-DEF_SVE_TYPES_ARRAY (cvt_h_s_float);\n-DEF_SVE_TYPES_ARRAY (cvt_f32_f16);\n-DEF_SVE_TYPES_ARRAY (cvt_long);\n-DEF_SVE_TYPES_ARRAY (cvt_mf8);\n-DEF_SVE_TYPES_ARRAY (cvt_narrow_s);\n-DEF_SVE_TYPES_ARRAY (cvt_narrow);\n-DEF_SVE_TYPES_ARRAY (cvt_s_s);\n-DEF_SVE_TYPES_ARRAY (cvtn_mf8);\n-DEF_SVE_TYPES_ARRAY (cvtnx_mf8);\n-DEF_SVE_TYPES_ARRAY (inc_dec_n);\n-DEF_SVE_TYPES_ARRAY (qcvt_x2);\n-DEF_SVE_TYPES_ARRAY (qcvt_x4);\n-DEF_SVE_TYPES_ARRAY (qrshr_x2);\n-DEF_SVE_TYPES_ARRAY (qrshr_x4);\n-DEF_SVE_TYPES_ARRAY (qrshru_x2);\n-DEF_SVE_TYPES_ARRAY (qrshru_x4);\n-DEF_SVE_TYPES_ARRAY (reinterpret);\n-DEF_SVE_TYPES_ARRAY (reinterpret_b);\n-DEF_SVE_TYPES_ARRAY (while);\n-DEF_SVE_TYPES_ARRAY (while_x);\n-DEF_SVE_TYPES_ARRAY (while_x_c);\n-DEF_SVE_TYPES_ARRAY (s_narrow_fsu);\n-DEF_SVE_TYPES_ARRAY (all_za);\n-DEF_SVE_TYPES_ARRAY (d_za);\n-DEF_SVE_TYPES_ARRAY (za_bhsd_data);\n-DEF_SVE_TYPES_ARRAY (za_all_data);\n-DEF_SVE_TYPES_ARRAY (za_h_mf8);\n-DEF_SVE_TYPES_ARRAY (za_h_bfloat);\n-DEF_SVE_TYPES_ARRAY (za_h_float);\n-DEF_SVE_TYPES_ARRAY (za_s_b_signed);\n-DEF_SVE_TYPES_ARRAY (za_s_b_unsigned);\n-DEF_SVE_TYPES_ARRAY (za_s_b_integer);\n-DEF_SVE_TYPES_ARRAY (za_s_h_integer);\n-DEF_SVE_TYPES_ARRAY (za_s_h_data);\n-DEF_SVE_TYPES_ARRAY (za_s_unsigned);\n-DEF_SVE_TYPES_ARRAY (za_s_integer);\n-DEF_SVE_TYPES_ARRAY (za_s_mf8);\n-DEF_SVE_TYPES_ARRAY (za_hs_mf8);\n-DEF_SVE_TYPES_ARRAY (za_s_float);\n-DEF_SVE_TYPES_ARRAY (za_s_data);\n-DEF_SVE_TYPES_ARRAY (za_d_h_integer);\n-DEF_SVE_TYPES_ARRAY (za_d_float);\n-DEF_SVE_TYPES_ARRAY (za_d_integer);\n-DEF_SVE_TYPES_ARRAY (mop_base);\n-DEF_SVE_TYPES_ARRAY (mop_base_signed);\n-DEF_SVE_TYPES_ARRAY (mop_base_unsigned);\n-DEF_SVE_TYPES_ARRAY (mop_i16i64);\n-DEF_SVE_TYPES_ARRAY (mop_i16i64_signed);\n-DEF_SVE_TYPES_ARRAY (mop_i16i64_unsigned);\n-DEF_SVE_TYPES_ARRAY (za);\n-\n-static const group_suffix_index groups_none[] = {\n- GROUP_none, NUM_GROUP_SUFFIXES\n-};\n-\n-static const group_suffix_index groups_x2[] = { GROUP_x2, NUM_GROUP_SUFFIXES };\n-\n-static const group_suffix_index groups_x12[] = {\n- GROUP_none, GROUP_x2, NUM_GROUP_SUFFIXES\n-};\n-\n-static const group_suffix_index groups_x4[] = { GROUP_x4, NUM_GROUP_SUFFIXES };\n-\n-static const group_suffix_index groups_x24[] = {\n- GROUP_x2, GROUP_x4, NUM_GROUP_SUFFIXES\n-};\n-\n-static const group_suffix_index groups_x124[] = {\n- GROUP_none, GROUP_x2, GROUP_x4, NUM_GROUP_SUFFIXES\n-};\n-\n-static const group_suffix_index groups_x1234[] = {\n- GROUP_none, GROUP_x2, GROUP_x3, GROUP_x4, NUM_GROUP_SUFFIXES\n-};\n-\n-static const group_suffix_index groups_vg1x2[] = {\n- GROUP_vg1x2, NUM_GROUP_SUFFIXES\n-};\n-\n-static const group_suffix_index groups_vg1x4[] = {\n- GROUP_vg1x4, NUM_GROUP_SUFFIXES\n-};\n-\n-static const group_suffix_index groups_vg1x24[] = {\n- GROUP_vg1x2, GROUP_vg1x4, NUM_GROUP_SUFFIXES\n-};\n-\n-static const group_suffix_index groups_vg2[] = {\n- GROUP_vg2x1, GROUP_vg2x2, GROUP_vg2x4, NUM_GROUP_SUFFIXES\n-};\n-\n-static const group_suffix_index groups_vg4[] = {\n- GROUP_vg4x1, GROUP_vg4x2, GROUP_vg4x4, NUM_GROUP_SUFFIXES\n-};\n-\n-static const group_suffix_index groups_vg24[] = {\n- GROUP_vg2, GROUP_vg4, NUM_GROUP_SUFFIXES\n-};\n-\n-/* Used by functions that have no governing predicate. */\n-static const predication_index preds_none[] = { PRED_none, NUM_PREDS };\n-\n-/* Used by functions that have a governing predicate but do not have an\n- explicit suffix. */\n-static const predication_index preds_implicit[] = { PRED_implicit, NUM_PREDS };\n-\n-/* Used by functions that only support \"_m\" predication. */\n-static const predication_index preds_m[] = { PRED_m, NUM_PREDS };\n-\n-/* Used by functions that allow merging and \"don't care\" predication,\n- but are not suitable for predicated MOVPRFX. */\n-static const predication_index preds_mx[] = {\n- PRED_m, PRED_x, NUM_PREDS\n-};\n-\n-/* Used by functions that allow merging, zeroing and \"don't care\"\n- predication. */\n-static const predication_index preds_mxz[] = {\n- PRED_m, PRED_x, PRED_z, NUM_PREDS\n-};\n-\n-/* Used by functions that have the mxz predicated forms above, and in addition\n- have an unpredicated form. */\n-static const predication_index preds_mxz_or_none[] = {\n- PRED_m, PRED_x, PRED_z, PRED_none, NUM_PREDS\n-};\n-\n-/* Used by functions that allow merging and zeroing predication but have\n- no \"_x\" form. */\n-static const predication_index preds_mz[] = { PRED_m, PRED_z, NUM_PREDS };\n-\n-/* Used by functions that have an unpredicated form and a _z predicated\n- form. */\n-static const predication_index preds_z_or_none[] = {\n- PRED_z, PRED_none, NUM_PREDS\n-};\n-\n-/* Used by (mostly predicate) functions that only support \"_z\" predication. */\n-static const predication_index preds_z[] = { PRED_z, NUM_PREDS };\n-\n-/* Used by SME instructions that always merge into ZA. */\n-static const predication_index preds_za_m[] = { PRED_za_m, NUM_PREDS };\n-\n-#define NONSTREAMING_SVE(X) nonstreaming_only (AARCH64_FL_SVE | (X))\n-#define SVE_AND_SME(X, Y) streaming_compatible (AARCH64_FL_SVE | (X), (Y))\n-#define SSVE(X) SVE_AND_SME (X, X)\n-\n /* A list of all arm_sve.h functions. */\n static CONSTEXPR const function_group_info function_groups[] = {\n #define DEF_SVE_FUNCTION_GS_FPM(NAME, SHAPE, TYPES, GROUPS, PREDS, FPM_MODE) \\\n@@ -1346,6 +539,9 @@ function_builder::function_builder (handle_pragma_index pragma_index,\n m_function_nulls = function_nulls;\n \n gcc_obstack_init (&m_string_obstack);\n+\n+ if (!function_table)\n+ function_table = hash_table<registered_function_hasher>::create_ggc (1023);\n }\n \n function_builder::~function_builder ()\n@@ -3859,9 +3055,9 @@ gimple_folder::fold_to_stmt_vops (gimple *g)\n gimple *\n gimple_folder::fold ()\n {\n- /* Don't fold anything when SVE is disabled; emit an error during\n+ /* Don't fold anything when NEON/SVE are disabled; emit an error during\n expansion instead. */\n- if (!TARGET_SVE)\n+ if (!TARGET_SIMD && !TARGET_SVE)\n return NULL;\n \n /* Punt if the function has a return type and no result location is\n@@ -4772,6 +3968,7 @@ init_builtins ()\n register_builtin_types ();\n if (in_lto_p)\n {\n+ aarch64_acle::handle_arm_neon_h (false);\n handle_arm_sve_h (false);\n handle_arm_sme_h (false);\n handle_arm_neon_sve_bridge_h (false);\n@@ -4874,13 +4071,27 @@ register_svprfop ()\n \t\t\t\t\t\t \"svprfop\", &values);\n }\n \n+static bool arm_sve_h_handled = false;\n+static location_t arm_sve_h_location;\n+\n+static bool arm_sme_h_handled = false;\n+static location_t arm_sme_h_location;\n+\n+static bool arm_neon_sve_bridge_h_handled = false;\n+static location_t arm_neon_sve_bridge_h_location;\n+\n /* Implement #pragma GCC aarch64 \"arm_sve.h\". */\n void\n handle_arm_sve_h (bool function_nulls_p)\n {\n- if (function_table)\n+ if (!aarch64_acle::arm_neon_h_handled)\n+ aarch64_acle::handle_arm_neon_h (false);\n+\n+ if (arm_sve_h_handled)\n {\n error (\"duplicate definition of %qs\", \"arm_sve.h\");\n+ inform (arm_sve_h_location, \"previous definition of %qs here\",\n+\t \"arm_sve.h\");\n return;\n }\n \n@@ -4903,25 +4114,38 @@ handle_arm_sve_h (bool function_nulls_p)\n register_svprfop ();\n \n /* Define the functions. */\n- function_table = hash_table<registered_function_hasher>::create_ggc (1023);\n function_builder builder (arm_sve_handle, function_nulls_p);\n for (unsigned int i = 0; i < ARRAY_SIZE (function_groups); ++i)\n builder.register_function_group (function_groups[i]);\n+\n+ arm_sve_h_handled = true;\n+ arm_sve_h_location = input_location;\n }\n \n /* Implement #pragma GCC aarch64 \"arm_neon_sve_bridge.h\". */\n void\n handle_arm_neon_sve_bridge_h (bool function_nulls_p)\n {\n- if (initial_indexes[arm_sme_handle] == 0)\n+ if (!arm_sme_h_handled)\n handle_arm_sme_h (true);\n \n+ if (arm_neon_sve_bridge_h_handled)\n+ {\n+ error (\"duplicate definition of %qs\", \"arm_neon_sve_bridge.h\");\n+ inform (arm_neon_sve_bridge_h_location, \"previous definition of %qs here\",\n+\t \"arm_neon_sve_bridge.h\");\n+ return;\n+ }\n+\n aarch64_target_switcher switcher;\n \n /* Define the functions. */\n function_builder builder (arm_neon_sve_handle, function_nulls_p);\n for (unsigned int i = 0; i < ARRAY_SIZE (neon_sve_function_groups); ++i)\n builder.register_function_group (neon_sve_function_groups[i]);\n+\n+ arm_neon_sve_bridge_h_handled = true;\n+ arm_neon_sve_bridge_h_location = input_location;\n }\n \n /* Return the function decl with SVE function subcode CODE, or error_mark_node\n@@ -4938,7 +4162,7 @@ builtin_decl (unsigned int code, bool)\n void\n handle_arm_sme_h (bool function_nulls_p)\n {\n- if (!function_table)\n+ if (!arm_sve_h_handled)\n {\n error (\"%qs defined without first defining %qs\",\n \t \"arm_sme.h\", \"arm_sve.h\");\n@@ -4950,6 +4174,9 @@ handle_arm_sme_h (bool function_nulls_p)\n function_builder builder (arm_sme_handle, function_nulls_p);\n for (unsigned int i = 0; i < ARRAY_SIZE (sme_function_groups); ++i)\n builder.register_function_group (sme_function_groups[i]);\n+\n+ arm_sme_h_handled = true;\n+ arm_sme_h_location = input_location;\n }\n \n /* If we're implementing manual overloading, check whether the SVE\ndiff --git a/gcc/config/aarch64/aarch64-sve-builtins.def b/gcc/config/aarch64/aarch64-sve-builtins.def\nindex 6ad257643b69..6df8d41d7f63 100644\n--- a/gcc/config/aarch64/aarch64-sve-builtins.def\n+++ b/gcc/config/aarch64/aarch64-sve-builtins.def\n@@ -26,6 +26,8 @@\n \n - aarch64-sve-builtins.def for common data types, groups, and other\n supporting definitions used across all files.\n+ - aarch64-neon-builtins-base.def for any AdvSIMD intrinsic that is\n+ enabled by the +simd extension.\n - aarch64-sve-builtins-base.def for the baseline SVE intrinsics which predate\n SVE2 and SME.\n - aarch64-sve-builtins-sve2.def for any scalable SIMD intrinsic that is\n@@ -159,6 +161,15 @@ DEF_SVE_NEON_TYPE_SUFFIX (u32, svuint32_t, unsigned, 32, VNx4SImode,\n DEF_SVE_NEON_TYPE_SUFFIX (u64, svuint64_t, unsigned, 64, VNx2DImode,\n \t\t\t Uint64x1_t, Uint64x2_t)\n \n+DEF_SVE_NEON_TYPE_SUFFIX (p8, svuint8_t, poly, 8, VNx16QImode,\n+\t\t\t Poly8x8_t, Poly8x16_t)\n+DEF_SVE_NEON_TYPE_SUFFIX (p16, svuint16_t, poly, 16, VNx8HImode,\n+\t\t\t Poly16x4_t, Poly16x8_t)\n+DEF_SVE_NEON_TYPE_SUFFIX (p64, svuint64_t, poly, 64, VNx2DImode,\n+\t\t\t Poly64x1_t, Poly64x2_t)\n+DEF_SVE_NEON_TYPE_SUFFIX (p128, svuint64_t, poly, 128, TImode,\n+\t\t\t Poly128_t, Poly128_t)\n+\n /* Associate _za with bytes. This is needed for svldr_vnum_za and\n svstr_vnum_za, whose ZA offset can be in the range [0, 15], as for za8. */\n DEF_SME_ZA_SUFFIX (za, 8, VNx16QImode)\ndiff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h\nindex 82cf94b51739..b5acb0c9321e 100644\n--- a/gcc/config/aarch64/arm_neon.h\n+++ b/gcc/config/aarch64/arm_neon.h\n@@ -193,147 +193,6 @@\n __vec;\t\t\t\t\t\t\t\t\\\n })\n \n-/* vadd */\n-__extension__ extern __inline int8x8_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_s8 (int8x8_t __a, int8x8_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline int16x4_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_s16 (int16x4_t __a, int16x4_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline int32x2_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_s32 (int32x2_t __a, int32x2_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline float32x2_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_f32 (float32x2_t __a, float32x2_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline float64x1_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_f64 (float64x1_t __a, float64x1_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline uint8x8_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_u8 (uint8x8_t __a, uint8x8_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline uint16x4_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_u16 (uint16x4_t __a, uint16x4_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline uint32x2_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_u32 (uint32x2_t __a, uint32x2_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline int64x1_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_s64 (int64x1_t __a, int64x1_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline uint64x1_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_u64 (uint64x1_t __a, uint64x1_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline int8x16_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_s8 (int8x16_t __a, int8x16_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline int16x8_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_s16 (int16x8_t __a, int16x8_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline int32x4_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_s32 (int32x4_t __a, int32x4_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline int64x2_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_s64 (int64x2_t __a, int64x2_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline float32x4_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_f32 (float32x4_t __a, float32x4_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline float64x2_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_f64 (float64x2_t __a, float64x2_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline uint8x16_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_u8 (uint8x16_t __a, uint8x16_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline uint16x8_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_u16 (uint16x8_t __a, uint16x8_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline uint32x4_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_u32 (uint32x4_t __a, uint32x4_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline uint64x2_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_u64 (uint64x2_t __a, uint64x2_t __b)\n-{\n- return __a + __b;\n-}\n-\n __extension__ extern __inline int16x8_t\n __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n vaddl_s8 (int8x8_t __a, int8x8_t __b)\n@@ -25904,20 +25763,6 @@ vsqrtq_f16 (float16x8_t __a)\n \n /* ARMv8.2-A FP16 two operands vector intrinsics. */\n \n-__extension__ extern __inline float16x4_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_f16 (float16x4_t __a, float16x4_t __b)\n-{\n- return __a + __b;\n-}\n-\n-__extension__ extern __inline float16x8_t\n-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_f16 (float16x8_t __a, float16x8_t __b)\n-{\n- return __a + __b;\n-}\n-\n __extension__ extern __inline float16x4_t\n __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))\n vabd_f16 (float16x4_t __a, float16x4_t __b)\n@@ -28526,55 +28371,6 @@ vusmmlaq_s32 (int32x4_t __r, uint8x16_t __a, int8x16_t __b)\n \n #pragma GCC pop_options\n \n-__extension__ extern __inline poly8x8_t\n-__attribute ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_p8 (poly8x8_t __a, poly8x8_t __b)\n-{\n- return __a ^ __b;\n-}\n-\n-__extension__ extern __inline poly16x4_t\n-__attribute ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_p16 (poly16x4_t __a, poly16x4_t __b)\n-{\n- return __a ^ __b;\n-}\n-\n-__extension__ extern __inline poly64x1_t\n-__attribute ((__always_inline__, __gnu_inline__, __artificial__))\n-vadd_p64 (poly64x1_t __a, poly64x1_t __b)\n-{\n- return __a ^ __b;\n-}\n-\n-__extension__ extern __inline poly8x16_t\n-__attribute ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_p8 (poly8x16_t __a, poly8x16_t __b)\n-{\n- return __a ^ __b;\n-}\n-\n-__extension__ extern __inline poly16x8_t\n-__attribute ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_p16 (poly16x8_t __a, poly16x8_t __b)\n-{\n- return __a ^__b;\n-}\n-\n-__extension__ extern __inline poly64x2_t\n-__attribute ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_p64 (poly64x2_t __a, poly64x2_t __b)\n-{\n- return __a ^ __b;\n-}\n-\n-__extension__ extern __inline poly128_t\n-__attribute ((__always_inline__, __gnu_inline__, __artificial__))\n-vaddq_p128 (poly128_t __a, poly128_t __b)\n-{\n- return __a ^ __b;\n-}\n-\n #undef __aarch64_vget_lane_any\n \n #undef __aarch64_vdup_lane_any\ndiff --git a/gcc/config/aarch64/t-aarch64 b/gcc/config/aarch64/t-aarch64\nindex 1171d2023490..66b302192ada 100644\n--- a/gcc/config/aarch64/t-aarch64\n+++ b/gcc/config/aarch64/t-aarch64\n@@ -67,6 +67,52 @@ aarch64-builtins.o: $(srcdir)/config/aarch64/aarch64-builtins.cc $(CONFIG_H) \\\n \t$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \\\n \t\t$(srcdir)/config/aarch64/aarch64-builtins.cc\n \n+aarch64-neon-builtins.o: \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins.cc \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins.def \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins-base.def \\\n+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) $(RTL_H) \\\n+ $(TM_P_H) memmodel.h insn-codes.h $(OPTABS_H) $(RECOG_H) $(DIAGNOSTIC_H) \\\n+ $(EXPR_H) $(BASIC_BLOCK_H) $(FUNCTION_H) fold-const.h $(GIMPLE_H) \\\n+ gimple-iterator.h gimplify.h explow.h $(EMIT_RTL_H) tree-vector-builder.h \\\n+ stor-layout.h alias.h gimple-fold.h langhooks.h \\\n+ stringpool.h \\\n+ $(srcdir)/config/aarch64/aarch64-acle-builtins.h \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins.h \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins-shapes.h \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins-base.h\n+\t$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \\\n+\t\t$(srcdir)/config/aarch64/aarch64-neon-builtins.cc\n+\n+aarch64-neon-builtins-shapes.o: \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins-shapes.cc \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins.def \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins-base.def \\\n+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) $(RTL_H) \\\n+ $(TM_P_H) memmodel.h insn-codes.h $(OPTABS_H) \\\n+ $(srcdir)/config/aarch64/aarch64-acle-builtins.h \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins.h \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins-shapes.h\n+\t$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \\\n+\t\t$(srcdir)/config/aarch64/aarch64-neon-builtins-shapes.cc\n+\n+aarch64-neon-builtins-base.o: \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins-base.cc \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins.def \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins-base.def \\\n+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) $(RTL_H) \\\n+ $(TM_P_H) memmodel.h insn-codes.h $(OPTABS_H) $(RECOG_H) \\\n+ $(EXPR_H) $(BASIC_BLOCK_H) $(FUNCTION_H) fold-const.h $(GIMPLE_H) \\\n+ gimple-iterator.h gimplify.h explow.h $(EMIT_RTL_H) tree-vector-builder.h \\\n+ rtx-vector-builder.h vec-perm-indices.h \\\n+ $(srcdir)/config/aarch64/aarch64-acle-builtins.h \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins.h \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins-shapes.h \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins-base.h \\\n+ $(srcdir)/config/aarch64/aarch64-neon-builtins-functions.h\n+\t$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \\\n+\t\t$(srcdir)/config/aarch64/aarch64-neon-builtins-base.cc\n+\n aarch64-sve-builtins.o: $(srcdir)/config/aarch64/aarch64-sve-builtins.cc \\\n $(srcdir)/config/aarch64/aarch64-sve-builtins.def \\\n $(srcdir)/config/aarch64/aarch64-sve-builtins-base.def \\\ndiff --git a/gcc/testsuite/g++.target/aarch64/pr103147-6.C b/gcc/testsuite/g++.target/aarch64/pr103147-6.C\nindex 15a606f976c8..bbea67b9b7db 100644\n--- a/gcc/testsuite/g++.target/aarch64/pr103147-6.C\n+++ b/gcc/testsuite/g++.target/aarch64/pr103147-6.C\n@@ -1,3 +1,4 @@\n /* { dg-options \"-mgeneral-regs-only\" } */\n+/* { dg-excess-errors \"arm_neon.h\" } */\n \n #include <arm_neon.h>\ndiff --git a/gcc/testsuite/g++.target/aarch64/pr117048.C b/gcc/testsuite/g++.target/aarch64/pr117048.C\nindex ae46e5875e4c..a9775700c5bf 100644\n--- a/gcc/testsuite/g++.target/aarch64/pr117048.C\n+++ b/gcc/testsuite/g++.target/aarch64/pr117048.C\n@@ -30,5 +30,5 @@ void G(\n v[12] = vgetq_lane_s64(vd01, 0);\n }\n \n-/* { dg-final { scan-assembler {\\txar\\tv[0-9]+\\.2d, v[0-9]+\\.2d, v[0-9]+\\.2d, 32\\n} } } */\n+/* { dg-final { scan-assembler {\\txar\\tv[0-9]+\\.2d, v[0-9]+\\.2d, v[0-9]+\\.2d, #?32\\n} } } */\n \ndiff --git a/gcc/testsuite/gcc.target/aarch64/neon/aarch64-neon.exp b/gcc/testsuite/gcc.target/aarch64/neon/aarch64-neon.exp\nnew file mode 100644\nindex 000000000000..03c4467e5354\n--- /dev/null\n+++ b/gcc/testsuite/gcc.target/aarch64/neon/aarch64-neon.exp\n@@ -0,0 +1,39 @@\n+# Specific regression driver for AArch64 NEON.\n+# Copyright (C) 2026-2026 Free Software Foundation, Inc.\n+# Contributed by ARM Ltd.\n+#\n+# This file is part of GCC.\n+#\n+# GCC is free software; you can redistribute it and/or modify it\n+# under the terms of the GNU General Public License as published by\n+# the Free Software Foundation; either version 3, or (at your option)\n+# any later version.\n+#\n+# GCC is distributed in the hope that it will be useful, but\n+# WITHOUT ANY WARRANTY; without even the implied warranty of\n+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n+# General Public License for more details.\n+#\n+# You should have received a copy of the GNU General Public License\n+# along with GCC; see the file COPYING3. If not see\n+# <http://www.gnu.org/licenses/>. */\n+\n+# GCC testsuite that uses the `dg.exp' driver.\n+\n+# Exit immediately if this isn't an AArch64 target.\n+if {![istarget aarch64*-*-*] } then {\n+ return\n+}\n+\n+# Load support procs.\n+load_lib gcc-dg.exp\n+\n+# Initialize `dg'.\n+dg-init\n+\n+# Main loop.\n+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*\\[cCs\\]]] \\\n+\t\" -ansi -pedantic-errors -std=c23 -O3 -march=armv8-a+simd\" \"\"\n+\n+# All done.\n+dg-finish\ndiff --git a/gcc/testsuite/gcc.target/aarch64/neon/arm_neon_test.h b/gcc/testsuite/gcc.target/aarch64/neon/arm_neon_test.h\nnew file mode 100644\nindex 000000000000..7d9371e01047\n--- /dev/null\n+++ b/gcc/testsuite/gcc.target/aarch64/neon/arm_neon_test.h\n@@ -0,0 +1,22 @@\n+#include \"arm_neon.h\"\n+\n+#pragma GCC target \"+simd+fp16+bf16+sha3\"\n+\n+#define TEST_UNARY(NAME, RET_TYPE, ARG_1_TYPE) \\\n+ RET_TYPE test_##NAME (ARG_1_TYPE a) { return NAME (a); }\n+\n+#define TEST_UNIFORM_UNARY(NAME, TYPE) TEST_UNARY (NAME, TYPE, TYPE)\n+\n+#define TEST_BINARY(NAME, RET_TYPE, ARG_1_TYPE, ARG_2_TYPE) \\\n+ RET_TYPE test_##NAME (ARG_1_TYPE a, ARG_2_TYPE b) { return NAME (a, b); }\n+\n+#define TEST_UNIFORM_BINARY(NAME, TYPE) TEST_BINARY (NAME, TYPE, TYPE, TYPE)\n+\n+#define TEST_TERNARY(NAME, RET_TYPE, ARG_1_TYPE, ARG_2_TYPE, ARG_3_TYPE) \\\n+ RET_TYPE test_##NAME (ARG_1_TYPE a, ARG_2_TYPE b, ARG_3_TYPE c) \\\n+ { \\\n+ return NAME (a, b, c); \\\n+ }\n+\n+#define TEST_UNIFORM_TERNARY(NAME, TYPE) \\\n+ TEST_TERNARY (NAME, TYPE, TYPE, TYPE, TYPE)\ndiff --git a/gcc/testsuite/gcc.target/aarch64/neon/vadd.c b/gcc/testsuite/gcc.target/aarch64/neon/vadd.c\nnew file mode 100644\nindex 000000000000..e622718685db\n--- /dev/null\n+++ b/gcc/testsuite/gcc.target/aarch64/neon/vadd.c\n@@ -0,0 +1,203 @@\n+/* { dg-do compile } */\n+/* { dg-final { check-function-bodies \"**\" \"\" } } */\n+\n+#include \"arm_neon_test.h\"\n+\n+/*\n+** test_vadd_u8:\n+** add\tv0\\.8b, (v0\\.8b, v1\\.8b|v1\\.8b, v0\\.8b)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vadd_u8, uint8x8_t)\n+\n+/*\n+** test_vadd_s8:\n+** add\tv0\\.8b, (v0\\.8b, v1\\.8b|v1\\.8b, v0\\.8b)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vadd_s8, int8x8_t)\n+\n+/*\n+** test_vadd_p8:\n+** eor\tv0\\.8b, (v0\\.8b, v1\\.8b|v1\\.8b, v0\\.8b)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vadd_p8, poly8x8_t)\n+\n+/*\n+** test_vadd_u16:\n+** add\tv0\\.4h, (v0\\.4h, v1\\.4h|v1\\.4h, v0\\.4h)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vadd_u16, uint16x4_t)\n+\n+/*\n+** test_vadd_s16:\n+** add\tv0\\.4h, (v0\\.4h, v1\\.4h|v1\\.4h, v0\\.4h)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vadd_s16, int16x4_t)\n+\n+/*\n+** test_vadd_p16:\n+** eor\tv0\\.8b, (v0\\.8b, v1\\.8b|v1\\.8b, v0\\.8b)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vadd_p16, poly16x4_t)\n+\n+/*\n+** test_vadd_u32:\n+** add\tv0\\.2s, (v0\\.2s, v1\\.2s|v1\\.2s, v0\\.2s)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vadd_u32, uint32x2_t)\n+\n+/*\n+** test_vadd_s32:\n+** add\tv0\\.2s, (v0\\.2s, v1\\.2s|v1\\.2s, v0\\.2s)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vadd_s32, int32x2_t)\n+\n+/*\n+** test_vadd_u64:\n+** add\td0, (d0, d1|d1, d0)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vadd_u64, uint64x1_t)\n+\n+/*\n+** test_vadd_s64:\n+** add\td0, (d0, d1|d1, d0)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vadd_s64, int64x1_t)\n+\n+/*\n+** test_vadd_p64:\n+** eor\tv0\\.8b, (v0\\.8b, v1\\.8b|v1\\.8b, v0\\.8b)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vadd_p64, poly64x1_t)\n+\n+/*\n+** test_vaddq_u8:\n+** add\tv0\\.16b, (v0\\.16b, v1\\.16b|v1\\.16b, v0\\.16b)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_u8, uint8x16_t)\n+\n+/*\n+** test_vaddq_s8:\n+** add\tv0\\.16b, (v0\\.16b, v1\\.16b|v1\\.16b, v0\\.16b)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_s8, int8x16_t)\n+\n+/*\n+** test_vaddq_p8:\n+** eor\tv0\\.16b, (v0\\.16b, v1\\.16b|v1\\.16b, v0\\.16b)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_p8, poly8x16_t)\n+\n+/*\n+** test_vaddq_u16:\n+** add\tv0\\.8h, (v0\\.8h, v1\\.8h|v1\\.8h, v0\\.8h)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_u16, uint16x8_t)\n+\n+/*\n+** test_vaddq_s16:\n+** add\tv0\\.8h, (v0\\.8h, v1\\.8h|v1\\.8h, v0\\.8h)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_s16, int16x8_t)\n+\n+/*\n+** test_vaddq_f16:\n+** fadd\tv0\\.8h, (v0\\.8h, v1\\.8h|v1\\.8h, v0\\.8h)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_f16, float16x8_t)\n+\n+/*\n+** test_vaddq_p16:\n+** eor\tv0\\.16b, (v0\\.16b, v1\\.16b|v1\\.16b, v0\\.16b)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_p16, poly16x8_t)\n+\n+/*\n+** test_vaddq_u32:\n+** add\tv0\\.4s, (v0\\.4s, v1\\.4s|v1\\.4s, v0\\.4s)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_u32, uint32x4_t)\n+\n+/*\n+** test_vaddq_s32:\n+** add\tv0\\.4s, (v0\\.4s, v1\\.4s|v1\\.4s, v0\\.4s)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_s32, int32x4_t)\n+\n+/*\n+** test_vaddq_f32:\n+** fadd\tv0\\.4s, (v0\\.4s, v1\\.4s|v1\\.4s, v0\\.4s)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_f32, float32x4_t)\n+\n+/*\n+** test_vaddq_u64:\n+** add\tv0\\.2d, (v0\\.2d, v1\\.2d|v1\\.2d, v0\\.2d)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_u64, uint64x2_t)\n+\n+/*\n+** test_vaddq_s64:\n+** add\tv0\\.2d, (v0\\.2d, v1\\.2d|v1\\.2d, v0\\.2d)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_s64, int64x2_t)\n+\n+/*\n+** test_vaddq_f64:\n+** fadd\tv0\\.2d, (v0\\.2d, v1\\.2d|v1\\.2d, v0\\.2d)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_f64, float64x2_t)\n+\n+/*\n+** test_vaddq_p64:\n+** eor\tv0\\.16b, (v0\\.16b, v1\\.16b|v1\\.16b, v0\\.16b)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_p64, poly64x2_t)\n+\n+/* `poly128_t` is a scalar type, like `__uint128_t`, so it is passed in two GPR\n+ registers. *\n+/*\n+** test_vaddq_p128:\n+** eor\tx[0-9], x[0-9]+, x[0-9]+\n+** eor\tx[0-9], x[0-9]+, x[0-9]+\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddq_p128, poly128_t)\n+\n+/*\n+** test_vaddd_u64:\n+** add\tx0, (x0, x1|x1, x0)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddd_u64, uint64_t)\n+\n+/*\n+** test_vaddd_s64:\n+** add\tx0, (x0, x1|x1, x0)\n+** ret\n+*/\n+TEST_UNIFORM_BINARY (vaddd_s64, int64_t)\ndiff --git a/gcc/testsuite/gcc.target/aarch64/pr103147-6.c b/gcc/testsuite/gcc.target/aarch64/pr103147-6.c\nindex 15a606f976c8..bbea67b9b7db 100644\n--- a/gcc/testsuite/gcc.target/aarch64/pr103147-6.c\n+++ b/gcc/testsuite/gcc.target/aarch64/pr103147-6.c\n@@ -1,3 +1,4 @@\n /* { dg-options \"-mgeneral-regs-only\" } */\n+/* { dg-excess-errors \"arm_neon.h\" } */\n \n #include <arm_neon.h>\n", "prefixes": [ "v1", "2/6" ] }