From patchwork Fri Aug 30 14:03:37 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejas Belagod X-Patchwork-Id: 271296 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (Client CN "www.sourceware.org", Issuer "StartCom Class 1 Primary Intermediate Server CA" (not verified)) by ozlabs.org (Postfix) with ESMTPS id BF1FF2C00B7 for ; Sat, 31 Aug 2013 00:04:01 +1000 (EST) DomainKey-Signature: a=rsa-sha1; c=nofws; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender :message-id:date:from:mime-version:to:subject:content-type; q= dns; s=default; b=ggLEEND0qGVv4An75pK8kuylusYfyUgdmvhDYyvKevO5as ghoaDjlgHvdwwB2/EfxQ7jim+zKK+TDaeW5ZqsDraWFy35otaD9fbZk4VYEGrWo5 0tHTA39TDArHxyLa5FaIj9O+C3T7tHt83Qe4v6pwufFGV4ZCpsEB9Y0eIxz3Y= DKIM-Signature: v=1; a=rsa-sha1; c=relaxed; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender :message-id:date:from:mime-version:to:subject:content-type; s= default; bh=kZK4kJpj7VrON2Au9twYFFltvL4=; b=ICNBYSyn6vsu8aBk6LyH FD9A4GKS3JQvVYmZ5cADAQO5sqyuzQNWEPo7mV9SexMSQL8FLF+4yVIu0DRujzVR HdnQfuBxuX0RP/wRIFNXvUYtKE6SqFznaT5O735RCH4Oq64MST0rvum/fPmdrVXt 5HvMixcDwR5KBXlheq4Dq2E= Received: (qmail 16326 invoked by alias); 30 Aug 2013 14:03:53 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Delivered-To: mailing list gcc-patches@gcc.gnu.org Received: (qmail 16317 invoked by uid 89); 30 Aug 2013 14:03:52 -0000 Received: from service87.mimecast.com (HELO service87.mimecast.com) (91.220.42.44) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with ESMTP; Fri, 30 Aug 2013 14:03:52 +0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-4.0 required=5.0 tests=AWL, BAYES_00, RCVD_IN_DNSWL_LOW, RCVD_IN_HOSTKARMA_NO, RP_MATCHES_RCVD, SPF_PASS autolearn=ham version=3.3.2 X-HELO: service87.mimecast.com Received: from cam-owa1.Emea.Arm.com (fw-tnat.cambridge.arm.com [217.140.96.21]) by service87.mimecast.com; Fri, 30 Aug 2013 15:03:45 +0100 Received: from [10.1.203.80] ([10.1.255.212]) by cam-owa1.Emea.Arm.com with Microsoft SMTPSVC(6.0.3790.0); Fri, 30 Aug 2013 15:03:44 +0100 Message-ID: <5220A639.3060704@arm.com> Date: Fri, 30 Aug 2013 15:03:37 +0100 From: Tejas Belagod User-Agent: Thunderbird 2.0.0.18 (X11/20081120) MIME-Version: 1.0 To: "gcc-patches@gcc.gnu.org" Subject: [Patch, AArch64] Remove arm_neon.h's dependency on stdint's macros. X-MC-Unique: 113083015034501701 Hi, The attached patch removes dependency on stdint's macros used in arm_neon.h viz. UINT64_C() and INT64_C() making arm_neon.h more C++-friendly. Tested on aarch64-none-elf. OK for trunk? Thanks, Tejas Belagod. ARM. Changelog: 2013-08-30 Tejas Belagod gcc/ * config/aarch64/arm_neon.h (__AARCH64_UINT64_C, __AARCH64_INT64_C): New arm_neon.h's internal macros to specify 64-bit constants. Avoid using stdint.h's macros. diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 13ef11d..6e8d8cd 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -29,6 +29,9 @@ #include +#define __AARCH64_UINT64_C(__C) ((uint64_t) __C) +#define __AARCH64_INT64_C(__C) ((int64_t) __C) + typedef __builtin_aarch64_simd_qi int8x8_t __attribute__ ((__vector_size__ (8))); typedef __builtin_aarch64_simd_hi int16x4_t @@ -9776,7 +9779,7 @@ vmovl_u32 (uint32x2_t a) __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vmovn_high_s16 (int8x8_t a, int16x8_t b) { - int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0))); + int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0))); __asm__ ("xtn2 %0.16b,%1.8h" : "+w"(result) : "w"(b) @@ -9787,7 +9790,7 @@ vmovn_high_s16 (int8x8_t a, int16x8_t b) __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmovn_high_s32 (int16x4_t a, int32x4_t b) { - int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0))); + int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0))); __asm__ ("xtn2 %0.8h,%1.4s" : "+w"(result) : "w"(b) @@ -9798,7 +9801,7 @@ vmovn_high_s32 (int16x4_t a, int32x4_t b) __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmovn_high_s64 (int32x2_t a, int64x2_t b) { - int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0))); + int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0))); __asm__ ("xtn2 %0.4s,%1.2d" : "+w"(result) : "w"(b) @@ -9809,7 +9812,7 @@ vmovn_high_s64 (int32x2_t a, int64x2_t b) __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vmovn_high_u16 (uint8x8_t a, uint16x8_t b) { - uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0))); + uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0))); __asm__ ("xtn2 %0.16b,%1.8h" : "+w"(result) : "w"(b) @@ -9820,7 +9823,7 @@ vmovn_high_u16 (uint8x8_t a, uint16x8_t b) __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmovn_high_u32 (uint16x4_t a, uint32x4_t b) { - uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0))); + uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0))); __asm__ ("xtn2 %0.8h,%1.4s" : "+w"(result) : "w"(b) @@ -9831,7 +9834,7 @@ vmovn_high_u32 (uint16x4_t a, uint32x4_t b) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmovn_high_u64 (uint32x2_t a, uint64x2_t b) { - uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0))); + uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0))); __asm__ ("xtn2 %0.4s,%1.2d" : "+w"(result) : "w"(b) @@ -12250,7 +12253,7 @@ vqdmulhq_n_s32 (int32x4_t a, int32_t b) __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqmovn_high_s16 (int8x8_t a, int16x8_t b) { - int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0))); + int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0))); __asm__ ("sqxtn2 %0.16b, %1.8h" : "+w"(result) : "w"(b) @@ -12261,7 +12264,7 @@ vqmovn_high_s16 (int8x8_t a, int16x8_t b) __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqmovn_high_s32 (int16x4_t a, int32x4_t b) { - int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0))); + int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0))); __asm__ ("sqxtn2 %0.8h, %1.4s" : "+w"(result) : "w"(b) @@ -12272,7 +12275,7 @@ vqmovn_high_s32 (int16x4_t a, int32x4_t b) __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqmovn_high_s64 (int32x2_t a, int64x2_t b) { - int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0))); + int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0))); __asm__ ("sqxtn2 %0.4s, %1.2d" : "+w"(result) : "w"(b) @@ -12283,7 +12286,7 @@ vqmovn_high_s64 (int32x2_t a, int64x2_t b) __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqmovn_high_u16 (uint8x8_t a, uint16x8_t b) { - uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0))); + uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0))); __asm__ ("uqxtn2 %0.16b, %1.8h" : "+w"(result) : "w"(b) @@ -12294,7 +12297,7 @@ vqmovn_high_u16 (uint8x8_t a, uint16x8_t b) __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vqmovn_high_u32 (uint16x4_t a, uint32x4_t b) { - uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0))); + uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0))); __asm__ ("uqxtn2 %0.8h, %1.4s" : "+w"(result) : "w"(b) @@ -12305,7 +12308,7 @@ vqmovn_high_u32 (uint16x4_t a, uint32x4_t b) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vqmovn_high_u64 (uint32x2_t a, uint64x2_t b) { - uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0))); + uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0))); __asm__ ("uqxtn2 %0.4s, %1.2d" : "+w"(result) : "w"(b) @@ -12316,7 +12319,7 @@ vqmovn_high_u64 (uint32x2_t a, uint64x2_t b) __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqmovun_high_s16 (uint8x8_t a, int16x8_t b) { - uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0))); + uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0))); __asm__ ("sqxtun2 %0.16b, %1.8h" : "+w"(result) : "w"(b) @@ -12327,7 +12330,7 @@ vqmovun_high_s16 (uint8x8_t a, int16x8_t b) __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vqmovun_high_s32 (uint16x4_t a, int32x4_t b) { - uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0))); + uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0))); __asm__ ("sqxtun2 %0.8h, %1.4s" : "+w"(result) : "w"(b) @@ -12338,7 +12341,7 @@ vqmovun_high_s32 (uint16x4_t a, int32x4_t b) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vqmovun_high_s64 (uint32x2_t a, int64x2_t b) { - uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0))); + uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0))); __asm__ ("sqxtun2 %0.4s, %1.2d" : "+w"(result) : "w"(b) @@ -12396,7 +12399,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) int16x8_t b_ = (b); \ int8x8_t a_ = (a); \ int8x16_t result = vcombine_s8 \ - (a_, vcreate_s8 (UINT64_C (0x0))); \ + (a_, vcreate_s8 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("sqrshrn2 %0.16b, %1.8h, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12410,7 +12414,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) int32x4_t b_ = (b); \ int16x4_t a_ = (a); \ int16x8_t result = vcombine_s16 \ - (a_, vcreate_s16 (UINT64_C (0x0))); \ + (a_, vcreate_s16 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("sqrshrn2 %0.8h, %1.4s, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12424,7 +12429,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) int64x2_t b_ = (b); \ int32x2_t a_ = (a); \ int32x4_t result = vcombine_s32 \ - (a_, vcreate_s32 (UINT64_C (0x0))); \ + (a_, vcreate_s32 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("sqrshrn2 %0.4s, %1.2d, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12438,7 +12444,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) uint16x8_t b_ = (b); \ uint8x8_t a_ = (a); \ uint8x16_t result = vcombine_u8 \ - (a_, vcreate_u8 (UINT64_C (0x0))); \ + (a_, vcreate_u8 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("uqrshrn2 %0.16b, %1.8h, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12452,7 +12459,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) uint32x4_t b_ = (b); \ uint16x4_t a_ = (a); \ uint16x8_t result = vcombine_u16 \ - (a_, vcreate_u16 (UINT64_C (0x0))); \ + (a_, vcreate_u16 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("uqrshrn2 %0.8h, %1.4s, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12466,7 +12474,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) uint64x2_t b_ = (b); \ uint32x2_t a_ = (a); \ uint32x4_t result = vcombine_u32 \ - (a_, vcreate_u32 (UINT64_C (0x0))); \ + (a_, vcreate_u32 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("uqrshrn2 %0.4s, %1.2d, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12480,7 +12489,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) int16x8_t b_ = (b); \ uint8x8_t a_ = (a); \ uint8x16_t result = vcombine_u8 \ - (a_, vcreate_u8 (UINT64_C (0x0))); \ + (a_, vcreate_u8 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("sqrshrun2 %0.16b, %1.8h, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12494,7 +12504,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) int32x4_t b_ = (b); \ uint16x4_t a_ = (a); \ uint16x8_t result = vcombine_u16 \ - (a_, vcreate_u16 (UINT64_C (0x0))); \ + (a_, vcreate_u16 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("sqrshrun2 %0.8h, %1.4s, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12508,7 +12519,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) int64x2_t b_ = (b); \ uint32x2_t a_ = (a); \ uint32x4_t result = vcombine_u32 \ - (a_, vcreate_u32 (UINT64_C (0x0))); \ + (a_, vcreate_u32 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("sqrshrun2 %0.4s, %1.2d, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12522,7 +12534,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) int16x8_t b_ = (b); \ int8x8_t a_ = (a); \ int8x16_t result = vcombine_s8 \ - (a_, vcreate_s8 (UINT64_C (0x0))); \ + (a_, vcreate_s8 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("sqshrn2 %0.16b, %1.8h, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12536,7 +12549,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) int32x4_t b_ = (b); \ int16x4_t a_ = (a); \ int16x8_t result = vcombine_s16 \ - (a_, vcreate_s16 (UINT64_C (0x0))); \ + (a_, vcreate_s16 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("sqshrn2 %0.8h, %1.4s, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12550,7 +12564,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) int64x2_t b_ = (b); \ int32x2_t a_ = (a); \ int32x4_t result = vcombine_s32 \ - (a_, vcreate_s32 (UINT64_C (0x0))); \ + (a_, vcreate_s32 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("sqshrn2 %0.4s, %1.2d, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12564,7 +12579,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) uint16x8_t b_ = (b); \ uint8x8_t a_ = (a); \ uint8x16_t result = vcombine_u8 \ - (a_, vcreate_u8 (UINT64_C (0x0))); \ + (a_, vcreate_u8 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("uqshrn2 %0.16b, %1.8h, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12578,7 +12594,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) uint32x4_t b_ = (b); \ uint16x4_t a_ = (a); \ uint16x8_t result = vcombine_u16 \ - (a_, vcreate_u16 (UINT64_C (0x0))); \ + (a_, vcreate_u16 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("uqshrn2 %0.8h, %1.4s, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12592,7 +12609,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) uint64x2_t b_ = (b); \ uint32x2_t a_ = (a); \ uint32x4_t result = vcombine_u32 \ - (a_, vcreate_u32 (UINT64_C (0x0))); \ + (a_, vcreate_u32 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("uqshrn2 %0.4s, %1.2d, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12606,7 +12624,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) int16x8_t b_ = (b); \ uint8x8_t a_ = (a); \ uint8x16_t result = vcombine_u8 \ - (a_, vcreate_u8 (UINT64_C (0x0))); \ + (a_, vcreate_u8 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("sqshrun2 %0.16b, %1.8h, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12620,7 +12639,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) int32x4_t b_ = (b); \ uint16x4_t a_ = (a); \ uint16x8_t result = vcombine_u16 \ - (a_, vcreate_u16 (UINT64_C (0x0))); \ + (a_, vcreate_u16 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("sqshrun2 %0.8h, %1.4s, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -12634,7 +12654,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b) int64x2_t b_ = (b); \ uint32x2_t a_ = (a); \ uint32x4_t result = vcombine_u32 \ - (a_, vcreate_u32 (UINT64_C (0x0))); \ + (a_, vcreate_u32 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("sqshrun2 %0.4s, %1.2d, #%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -13110,7 +13131,8 @@ vrev64q_u32 (uint32x4_t a) int16x8_t b_ = (b); \ int8x8_t a_ = (a); \ int8x16_t result = vcombine_s8 \ - (a_, vcreate_s8 (UINT64_C (0x0))); \ + (a_, vcreate_s8 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -13124,7 +13146,8 @@ vrev64q_u32 (uint32x4_t a) int32x4_t b_ = (b); \ int16x4_t a_ = (a); \ int16x8_t result = vcombine_s16 \ - (a_, vcreate_s16 (UINT64_C (0x0))); \ + (a_, vcreate_s16 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -13138,7 +13161,8 @@ vrev64q_u32 (uint32x4_t a) int64x2_t b_ = (b); \ int32x2_t a_ = (a); \ int32x4_t result = vcombine_s32 \ - (a_, vcreate_s32 (UINT64_C (0x0))); \ + (a_, vcreate_s32 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -13152,7 +13176,8 @@ vrev64q_u32 (uint32x4_t a) uint16x8_t b_ = (b); \ uint8x8_t a_ = (a); \ uint8x16_t result = vcombine_u8 \ - (a_, vcreate_u8 (UINT64_C (0x0))); \ + (a_, vcreate_u8 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -13166,7 +13191,8 @@ vrev64q_u32 (uint32x4_t a) uint32x4_t b_ = (b); \ uint16x4_t a_ = (a); \ uint16x8_t result = vcombine_u16 \ - (a_, vcreate_u16 (UINT64_C (0x0))); \ + (a_, vcreate_u16 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -13180,7 +13206,8 @@ vrev64q_u32 (uint32x4_t a) uint64x2_t b_ = (b); \ uint32x2_t a_ = (a); \ uint32x4_t result = vcombine_u32 \ - (a_, vcreate_u32 (UINT64_C (0x0))); \ + (a_, vcreate_u32 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -13417,7 +13444,7 @@ vrsrtsq_f64 (float64x2_t a, float64x2_t b) __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vrsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c) { - int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0))); + int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0))); __asm__ ("rsubhn2 %0.16b, %1.8h, %2.8h" : "+w"(result) : "w"(b), "w"(c) @@ -13428,7 +13455,7 @@ vrsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c) __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vrsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c) { - int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0))); + int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0))); __asm__ ("rsubhn2 %0.8h, %1.4s, %2.4s" : "+w"(result) : "w"(b), "w"(c) @@ -13439,7 +13466,7 @@ vrsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c) __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vrsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c) { - int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0))); + int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0))); __asm__ ("rsubhn2 %0.4s, %1.2d, %2.2d" : "+w"(result) : "w"(b), "w"(c) @@ -13450,7 +13477,7 @@ vrsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c) __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vrsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c) { - uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0))); + uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0))); __asm__ ("rsubhn2 %0.16b, %1.8h, %2.8h" : "+w"(result) : "w"(b), "w"(c) @@ -13461,7 +13488,7 @@ vrsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c) __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vrsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c) { - uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0))); + uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0))); __asm__ ("rsubhn2 %0.8h, %1.4s, %2.4s" : "+w"(result) : "w"(b), "w"(c) @@ -13472,7 +13499,7 @@ vrsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vrsubhn_high_u64 (uint32x2_t a, uint64x2_t b, uint64x2_t c) { - uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0))); + uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0))); __asm__ ("rsubhn2 %0.4s, %1.2d, %2.2d" : "+w"(result) : "w"(b), "w"(c) @@ -13864,7 +13891,8 @@ vrsubhn_u64 (uint64x2_t a, uint64x2_t b) int16x8_t b_ = (b); \ int8x8_t a_ = (a); \ int8x16_t result = vcombine_s8 \ - (a_, vcreate_s8 (UINT64_C (0x0))); \ + (a_, vcreate_s8 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("shrn2 %0.16b,%1.8h,#%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -13878,7 +13906,8 @@ vrsubhn_u64 (uint64x2_t a, uint64x2_t b) int32x4_t b_ = (b); \ int16x4_t a_ = (a); \ int16x8_t result = vcombine_s16 \ - (a_, vcreate_s16 (UINT64_C (0x0))); \ + (a_, vcreate_s16 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("shrn2 %0.8h,%1.4s,#%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -13892,7 +13921,8 @@ vrsubhn_u64 (uint64x2_t a, uint64x2_t b) int64x2_t b_ = (b); \ int32x2_t a_ = (a); \ int32x4_t result = vcombine_s32 \ - (a_, vcreate_s32 (UINT64_C (0x0))); \ + (a_, vcreate_s32 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("shrn2 %0.4s,%1.2d,#%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -13906,7 +13936,8 @@ vrsubhn_u64 (uint64x2_t a, uint64x2_t b) uint16x8_t b_ = (b); \ uint8x8_t a_ = (a); \ uint8x16_t result = vcombine_u8 \ - (a_, vcreate_u8 (UINT64_C (0x0))); \ + (a_, vcreate_u8 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("shrn2 %0.16b,%1.8h,#%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -13920,7 +13951,8 @@ vrsubhn_u64 (uint64x2_t a, uint64x2_t b) uint32x4_t b_ = (b); \ uint16x4_t a_ = (a); \ uint16x8_t result = vcombine_u16 \ - (a_, vcreate_u16 (UINT64_C (0x0))); \ + (a_, vcreate_u16 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("shrn2 %0.8h,%1.4s,#%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -13934,7 +13966,8 @@ vrsubhn_u64 (uint64x2_t a, uint64x2_t b) uint64x2_t b_ = (b); \ uint32x2_t a_ = (a); \ uint32x4_t result = vcombine_u32 \ - (a_, vcreate_u32 (UINT64_C (0x0))); \ + (a_, vcreate_u32 \ + (__AARCH64_UINT64_C (0x0))); \ __asm__ ("shrn2 %0.4s,%1.2d,#%2" \ : "+w"(result) \ : "w"(b_), "i"(c) \ @@ -14386,7 +14419,7 @@ vrsubhn_u64 (uint64x2_t a, uint64x2_t b) __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c) { - int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0))); + int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0))); __asm__ ("subhn2 %0.16b, %1.8h, %2.8h" : "+w"(result) : "w"(b), "w"(c) @@ -14397,7 +14430,7 @@ vsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c) __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c) { - int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0))); + int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0))); __asm__ ("subhn2 %0.8h, %1.4s, %2.4s" : "+w"(result) : "w"(b), "w"(c) @@ -14408,7 +14441,7 @@ vsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c) __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c) { - int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0))); + int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0))); __asm__ ("subhn2 %0.4s, %1.2d, %2.2d" : "+w"(result) : "w"(b), "w"(c) @@ -14419,7 +14452,7 @@ vsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c) __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c) { - uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0))); + uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0))); __asm__ ("subhn2 %0.16b, %1.8h, %2.8h" : "+w"(result) : "w"(b), "w"(c) @@ -14430,7 +14463,7 @@ vsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c) __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c) { - uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0))); + uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0))); __asm__ ("subhn2 %0.8h, %1.4s, %2.4s" : "+w"(result) : "w"(b), "w"(c) @@ -14441,7 +14474,7 @@ vsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsubhn_high_u64 (uint32x2_t a, uint64x2_t b, uint64x2_t c) { - uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0))); + uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0))); __asm__ ("subhn2 %0.4s, %1.2d, %2.2d" : "+w"(result) : "w"(b), "w"(c) @@ -17039,7 +17072,7 @@ __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vtbl1_s8 (int8x8_t tab, int8x8_t idx) { int8x8_t result; - int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (UINT64_C (0x0))); + int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (__AARCH64_UINT64_C (0x0))); __asm__ ("tbl %0.8b, {%1.16b}, %2.8b" : "=w"(result) : "w"(temp), "w"(idx) @@ -17051,7 +17084,7 @@ __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtbl1_u8 (uint8x8_t tab, uint8x8_t idx) { uint8x8_t result; - uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (UINT64_C (0x0))); + uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (__AARCH64_UINT64_C (0x0))); __asm__ ("tbl %0.8b, {%1.16b}, %2.8b" : "=w"(result) : "w"(temp), "w"(idx) @@ -17063,7 +17096,7 @@ __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vtbl1_p8 (poly8x8_t tab, uint8x8_t idx) { poly8x8_t result; - poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (UINT64_C (0x0))); + poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (__AARCH64_UINT64_C (0x0))); __asm__ ("tbl %0.8b, {%1.16b}, %2.8b" : "=w"(result) : "w"(temp), "w"(idx) @@ -17113,7 +17146,7 @@ vtbl3_s8 (int8x8x3_t tab, int8x8_t idx) int8x8_t result; int8x16x2_t temp; temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]); - temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (UINT64_C (0x0))); + temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (__AARCH64_UINT64_C (0x0))); __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t" "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t" : "=w"(result) @@ -17128,7 +17161,7 @@ vtbl3_u8 (uint8x8x3_t tab, uint8x8_t idx) uint8x8_t result; uint8x16x2_t temp; temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]); - temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (UINT64_C (0x0))); + temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (__AARCH64_UINT64_C (0x0))); __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t" "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t" : "=w"(result) @@ -17143,7 +17176,7 @@ vtbl3_p8 (poly8x8x3_t tab, uint8x8_t idx) poly8x8_t result; poly8x16x2_t temp; temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]); - temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (UINT64_C (0x0))); + temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (__AARCH64_UINT64_C (0x0))); __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t" "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t" : "=w"(result) @@ -17202,7 +17235,7 @@ vtbx1_s8 (int8x8_t r, int8x8_t tab, int8x8_t idx) { int8x8_t result; int8x8_t tmp1; - int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (UINT64_C (0x0))); + int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (__AARCH64_UINT64_C (0x0))); __asm__ ("movi %0.8b, 8\n\t" "cmhs %0.8b, %3.8b, %0.8b\n\t" "tbl %1.8b, {%2.16b}, %3.8b\n\t" @@ -17218,7 +17251,7 @@ vtbx1_u8 (uint8x8_t r, uint8x8_t tab, uint8x8_t idx) { uint8x8_t result; uint8x8_t tmp1; - uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (UINT64_C (0x0))); + uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (__AARCH64_UINT64_C (0x0))); __asm__ ("movi %0.8b, 8\n\t" "cmhs %0.8b, %3.8b, %0.8b\n\t" "tbl %1.8b, {%2.16b}, %3.8b\n\t" @@ -17234,7 +17267,7 @@ vtbx1_p8 (poly8x8_t r, poly8x8_t tab, uint8x8_t idx) { poly8x8_t result; poly8x8_t tmp1; - poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (UINT64_C (0x0))); + poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (__AARCH64_UINT64_C (0x0))); __asm__ ("movi %0.8b, 8\n\t" "cmhs %0.8b, %3.8b, %0.8b\n\t" "tbl %1.8b, {%2.16b}, %3.8b\n\t" @@ -17288,7 +17321,7 @@ vtbx3_s8 (int8x8_t r, int8x8x3_t tab, int8x8_t idx) int8x8_t tmp1; int8x16x2_t temp; temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]); - temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (UINT64_C (0x0))); + temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (__AARCH64_UINT64_C (0x0))); __asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t" "movi %0.8b, 24\n\t" "cmhs %0.8b, %3.8b, %0.8b\n\t" @@ -17307,7 +17340,7 @@ vtbx3_u8 (uint8x8_t r, uint8x8x3_t tab, uint8x8_t idx) uint8x8_t tmp1; uint8x16x2_t temp; temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]); - temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (UINT64_C (0x0))); + temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (__AARCH64_UINT64_C (0x0))); __asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t" "movi %0.8b, 24\n\t" "cmhs %0.8b, %3.8b, %0.8b\n\t" @@ -17326,7 +17359,7 @@ vtbx3_p8 (poly8x8_t r, poly8x8x3_t tab, uint8x8_t idx) poly8x8_t tmp1; poly8x16x2_t temp; temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]); - temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (UINT64_C (0x0))); + temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (__AARCH64_UINT64_C (0x0))); __asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t" "movi %0.8b, 24\n\t" "cmhs %0.8b, %3.8b, %0.8b\n\t" @@ -21448,7 +21481,7 @@ vqdmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c) __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d) { - int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (INT64_C (0))); + int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (__AARCH64_INT64_C (0))); return __builtin_aarch64_sqdmlal_lanev4hi (__a, __b, __tmp, __d); } @@ -21499,7 +21532,7 @@ vqdmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c) __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d) { - int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (INT64_C (0))); + int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (__AARCH64_INT64_C (0))); return __builtin_aarch64_sqdmlal_lanev2si (__a, __b, __tmp, __d); } @@ -21576,7 +21609,7 @@ vqdmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c) __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d) { - int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (INT64_C (0))); + int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (__AARCH64_INT64_C (0))); return __builtin_aarch64_sqdmlsl_lanev4hi (__a, __b, __tmp, __d); } @@ -21627,7 +21660,7 @@ vqdmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c) __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d) { - int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (INT64_C (0))); + int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (__AARCH64_INT64_C (0))); return __builtin_aarch64_sqdmlsl_lanev2si (__a, __b, __tmp, __d); } @@ -21752,7 +21785,7 @@ vqdmull_high_n_s16 (int16x8_t __a, int16_t __b) __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, int const __c) { - int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0))); + int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (__AARCH64_INT64_C (0))); return __builtin_aarch64_sqdmull_lanev4hi (__a, __tmp, __c); } @@ -21801,7 +21834,7 @@ vqdmull_high_n_s32 (int32x4_t __a, int32_t __b) __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, int const __c) { - int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0))); + int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (__AARCH64_INT64_C (0))); return __builtin_aarch64_sqdmull_lanev2si (__a, __tmp, __c); } @@ -24388,8 +24421,8 @@ vst2_s64 (int64_t * __a, int64x1x2_t val) { __builtin_aarch64_simd_oi __o; int64x2x2_t temp; - temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0))); - temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0))); + temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (__AARCH64_INT64_C (0))); + temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (__AARCH64_INT64_C (0))); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1); __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o); @@ -24400,8 +24433,8 @@ vst2_u64 (uint64_t * __a, uint64x1x2_t val) { __builtin_aarch64_simd_oi __o; uint64x2x2_t temp; - temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0))); - temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0))); + temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1); __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o); @@ -24412,8 +24445,8 @@ vst2_f64 (float64_t * __a, float64x1x2_t val) { __builtin_aarch64_simd_oi __o; float64x2x2_t temp; - temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0))); - temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0))); + temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[1], 1); __builtin_aarch64_st2df ((__builtin_aarch64_simd_df *) __a, __o); @@ -24424,8 +24457,8 @@ vst2_s8 (int8_t * __a, int8x8x2_t val) { __builtin_aarch64_simd_oi __o; int8x16x2_t temp; - temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0))); - temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0))); + temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (__AARCH64_INT64_C (0))); + temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (__AARCH64_INT64_C (0))); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o); @@ -24436,8 +24469,8 @@ vst2_p8 (poly8_t * __a, poly8x8x2_t val) { __builtin_aarch64_simd_oi __o; poly8x16x2_t temp; - temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0))); - temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0))); + temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o); @@ -24448,8 +24481,8 @@ vst2_s16 (int16_t * __a, int16x4x2_t val) { __builtin_aarch64_simd_oi __o; int16x8x2_t temp; - temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0))); - temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0))); + temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (__AARCH64_INT64_C (0))); + temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (__AARCH64_INT64_C (0))); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1); __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o); @@ -24460,8 +24493,8 @@ vst2_p16 (poly16_t * __a, poly16x4x2_t val) { __builtin_aarch64_simd_oi __o; poly16x8x2_t temp; - temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0))); - temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0))); + temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1); __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o); @@ -24472,8 +24505,8 @@ vst2_s32 (int32_t * __a, int32x2x2_t val) { __builtin_aarch64_simd_oi __o; int32x4x2_t temp; - temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0))); - temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0))); + temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (__AARCH64_INT64_C (0))); + temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (__AARCH64_INT64_C (0))); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1); __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o); @@ -24484,8 +24517,8 @@ vst2_u8 (uint8_t * __a, uint8x8x2_t val) { __builtin_aarch64_simd_oi __o; uint8x16x2_t temp; - temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0))); - temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0))); + temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o); @@ -24496,8 +24529,8 @@ vst2_u16 (uint16_t * __a, uint16x4x2_t val) { __builtin_aarch64_simd_oi __o; uint16x8x2_t temp; - temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0))); - temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0))); + temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1); __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o); @@ -24508,8 +24541,8 @@ vst2_u32 (uint32_t * __a, uint32x2x2_t val) { __builtin_aarch64_simd_oi __o; uint32x4x2_t temp; - temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0))); - temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0))); + temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1); __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o); @@ -24520,8 +24553,8 @@ vst2_f32 (float32_t * __a, float32x2x2_t val) { __builtin_aarch64_simd_oi __o; float32x4x2_t temp; - temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0))); - temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0))); + temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[1], 1); __builtin_aarch64_st2v2sf ((__builtin_aarch64_simd_sf *) __a, __o); @@ -24640,9 +24673,9 @@ vst3_s64 (int64_t * __a, int64x1x3_t val) { __builtin_aarch64_simd_ci __o; int64x2x3_t temp; - temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0))); - temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0))); - temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (INT64_C (0))); + temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (__AARCH64_INT64_C (0))); + temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (__AARCH64_INT64_C (0))); + temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (__AARCH64_INT64_C (0))); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2); @@ -24654,9 +24687,9 @@ vst3_u64 (uint64_t * __a, uint64x1x3_t val) { __builtin_aarch64_simd_ci __o; uint64x2x3_t temp; - temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0))); - temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0))); - temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (UINT64_C (0))); + temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2); @@ -24668,9 +24701,9 @@ vst3_f64 (float64_t * __a, float64x1x3_t val) { __builtin_aarch64_simd_ci __o; float64x2x3_t temp; - temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0))); - temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0))); - temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (UINT64_C (0))); + temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[2], 2); @@ -24682,9 +24715,9 @@ vst3_s8 (int8_t * __a, int8x8x3_t val) { __builtin_aarch64_simd_ci __o; int8x16x3_t temp; - temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0))); - temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0))); - temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (INT64_C (0))); + temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (__AARCH64_INT64_C (0))); + temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (__AARCH64_INT64_C (0))); + temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (__AARCH64_INT64_C (0))); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2); @@ -24696,9 +24729,9 @@ vst3_p8 (poly8_t * __a, poly8x8x3_t val) { __builtin_aarch64_simd_ci __o; poly8x16x3_t temp; - temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0))); - temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0))); - temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (UINT64_C (0))); + temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2); @@ -24710,9 +24743,9 @@ vst3_s16 (int16_t * __a, int16x4x3_t val) { __builtin_aarch64_simd_ci __o; int16x8x3_t temp; - temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0))); - temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0))); - temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (INT64_C (0))); + temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (__AARCH64_INT64_C (0))); + temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (__AARCH64_INT64_C (0))); + temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (__AARCH64_INT64_C (0))); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2); @@ -24724,9 +24757,9 @@ vst3_p16 (poly16_t * __a, poly16x4x3_t val) { __builtin_aarch64_simd_ci __o; poly16x8x3_t temp; - temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0))); - temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0))); - temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (UINT64_C (0))); + temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2); @@ -24738,9 +24771,9 @@ vst3_s32 (int32_t * __a, int32x2x3_t val) { __builtin_aarch64_simd_ci __o; int32x4x3_t temp; - temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0))); - temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0))); - temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (INT64_C (0))); + temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (__AARCH64_INT64_C (0))); + temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (__AARCH64_INT64_C (0))); + temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (__AARCH64_INT64_C (0))); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2); @@ -24752,9 +24785,9 @@ vst3_u8 (uint8_t * __a, uint8x8x3_t val) { __builtin_aarch64_simd_ci __o; uint8x16x3_t temp; - temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0))); - temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0))); - temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (UINT64_C (0))); + temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2); @@ -24766,9 +24799,9 @@ vst3_u16 (uint16_t * __a, uint16x4x3_t val) { __builtin_aarch64_simd_ci __o; uint16x8x3_t temp; - temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0))); - temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0))); - temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (UINT64_C (0))); + temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2); @@ -24780,9 +24813,9 @@ vst3_u32 (uint32_t * __a, uint32x2x3_t val) { __builtin_aarch64_simd_ci __o; uint32x4x3_t temp; - temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0))); - temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0))); - temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (UINT64_C (0))); + temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2); @@ -24794,9 +24827,9 @@ vst3_f32 (float32_t * __a, float32x2x3_t val) { __builtin_aarch64_simd_ci __o; float32x4x3_t temp; - temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0))); - temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0))); - temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (UINT64_C (0))); + temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[2], 2); @@ -24928,10 +24961,10 @@ vst4_s64 (int64_t * __a, int64x1x4_t val) { __builtin_aarch64_simd_xi __o; int64x2x4_t temp; - temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0))); - temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0))); - temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (INT64_C (0))); - temp.val[3] = vcombine_s64 (val.val[3], vcreate_s64 (INT64_C (0))); + temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (__AARCH64_INT64_C (0))); + temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (__AARCH64_INT64_C (0))); + temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (__AARCH64_INT64_C (0))); + temp.val[3] = vcombine_s64 (val.val[3], vcreate_s64 (__AARCH64_INT64_C (0))); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2); @@ -24944,10 +24977,10 @@ vst4_u64 (uint64_t * __a, uint64x1x4_t val) { __builtin_aarch64_simd_xi __o; uint64x2x4_t temp; - temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0))); - temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0))); - temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (UINT64_C (0))); - temp.val[3] = vcombine_u64 (val.val[3], vcreate_u64 (UINT64_C (0))); + temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (__AARCH64_UINT64_C (0))); + temp.val[3] = vcombine_u64 (val.val[3], vcreate_u64 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2); @@ -24960,10 +24993,10 @@ vst4_f64 (float64_t * __a, float64x1x4_t val) { __builtin_aarch64_simd_xi __o; float64x2x4_t temp; - temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0))); - temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0))); - temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (UINT64_C (0))); - temp.val[3] = vcombine_f64 (val.val[3], vcreate_f64 (UINT64_C (0))); + temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (__AARCH64_UINT64_C (0))); + temp.val[3] = vcombine_f64 (val.val[3], vcreate_f64 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[2], 2); @@ -24976,10 +25009,10 @@ vst4_s8 (int8_t * __a, int8x8x4_t val) { __builtin_aarch64_simd_xi __o; int8x16x4_t temp; - temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0))); - temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0))); - temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (INT64_C (0))); - temp.val[3] = vcombine_s8 (val.val[3], vcreate_s8 (INT64_C (0))); + temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (__AARCH64_INT64_C (0))); + temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (__AARCH64_INT64_C (0))); + temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (__AARCH64_INT64_C (0))); + temp.val[3] = vcombine_s8 (val.val[3], vcreate_s8 (__AARCH64_INT64_C (0))); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2); @@ -24992,10 +25025,10 @@ vst4_p8 (poly8_t * __a, poly8x8x4_t val) { __builtin_aarch64_simd_xi __o; poly8x16x4_t temp; - temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0))); - temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0))); - temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (UINT64_C (0))); - temp.val[3] = vcombine_p8 (val.val[3], vcreate_p8 (UINT64_C (0))); + temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (__AARCH64_UINT64_C (0))); + temp.val[3] = vcombine_p8 (val.val[3], vcreate_p8 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2); @@ -25008,10 +25041,10 @@ vst4_s16 (int16_t * __a, int16x4x4_t val) { __builtin_aarch64_simd_xi __o; int16x8x4_t temp; - temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0))); - temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0))); - temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (INT64_C (0))); - temp.val[3] = vcombine_s16 (val.val[3], vcreate_s16 (INT64_C (0))); + temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (__AARCH64_INT64_C (0))); + temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (__AARCH64_INT64_C (0))); + temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (__AARCH64_INT64_C (0))); + temp.val[3] = vcombine_s16 (val.val[3], vcreate_s16 (__AARCH64_INT64_C (0))); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2); @@ -25024,10 +25057,10 @@ vst4_p16 (poly16_t * __a, poly16x4x4_t val) { __builtin_aarch64_simd_xi __o; poly16x8x4_t temp; - temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0))); - temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0))); - temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (UINT64_C (0))); - temp.val[3] = vcombine_p16 (val.val[3], vcreate_p16 (UINT64_C (0))); + temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (__AARCH64_UINT64_C (0))); + temp.val[3] = vcombine_p16 (val.val[3], vcreate_p16 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2); @@ -25040,10 +25073,10 @@ vst4_s32 (int32_t * __a, int32x2x4_t val) { __builtin_aarch64_simd_xi __o; int32x4x4_t temp; - temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0))); - temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0))); - temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (INT64_C (0))); - temp.val[3] = vcombine_s32 (val.val[3], vcreate_s32 (INT64_C (0))); + temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (__AARCH64_INT64_C (0))); + temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (__AARCH64_INT64_C (0))); + temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (__AARCH64_INT64_C (0))); + temp.val[3] = vcombine_s32 (val.val[3], vcreate_s32 (__AARCH64_INT64_C (0))); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2); @@ -25056,10 +25089,10 @@ vst4_u8 (uint8_t * __a, uint8x8x4_t val) { __builtin_aarch64_simd_xi __o; uint8x16x4_t temp; - temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0))); - temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0))); - temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (UINT64_C (0))); - temp.val[3] = vcombine_u8 (val.val[3], vcreate_u8 (UINT64_C (0))); + temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (__AARCH64_UINT64_C (0))); + temp.val[3] = vcombine_u8 (val.val[3], vcreate_u8 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2); @@ -25072,10 +25105,10 @@ vst4_u16 (uint16_t * __a, uint16x4x4_t val) { __builtin_aarch64_simd_xi __o; uint16x8x4_t temp; - temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0))); - temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0))); - temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (UINT64_C (0))); - temp.val[3] = vcombine_u16 (val.val[3], vcreate_u16 (UINT64_C (0))); + temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (__AARCH64_UINT64_C (0))); + temp.val[3] = vcombine_u16 (val.val[3], vcreate_u16 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2); @@ -25088,10 +25121,10 @@ vst4_u32 (uint32_t * __a, uint32x2x4_t val) { __builtin_aarch64_simd_xi __o; uint32x4x4_t temp; - temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0))); - temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0))); - temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (UINT64_C (0))); - temp.val[3] = vcombine_u32 (val.val[3], vcreate_u32 (UINT64_C (0))); + temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (__AARCH64_UINT64_C (0))); + temp.val[3] = vcombine_u32 (val.val[3], vcreate_u32 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2); @@ -25104,10 +25137,10 @@ vst4_f32 (float32_t * __a, float32x2x4_t val) { __builtin_aarch64_simd_xi __o; float32x4x4_t temp; - temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0))); - temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0))); - temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (UINT64_C (0))); - temp.val[3] = vcombine_f32 (val.val[3], vcreate_f32 (UINT64_C (0))); + temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0))); + temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0))); + temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (__AARCH64_UINT64_C (0))); + temp.val[3] = vcombine_f32 (val.val[3], vcreate_f32 (__AARCH64_UINT64_C (0))); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[2], 2);