@@ -5853,6 +5853,50 @@ aarch64_preferred_simd_mode (enum machine_mode mode)
return word_mode;
}
+/* A table to help perform AArch64-specific name mangling for AdvSIMD
+ vector types in order to conform to the AAPCS64 (see "Procedure
+ Call Standard for the ARM 64-bit Architecture", Appendix A). To
+ qualify for emission with the mangled names defined in that document,
+ a vector type must not only be of the correct mode but also be
+ composed of AdvSIMD vector element types (e.g.
+ _builtin_aarch64_simd_qi); these types are registered by
+ aarch64_init_simd_builtins (). In other words, vector types defined
+ in other ways e.g. via vector_size attribute will get default
+ mangled names. */
+typedef struct
+{
+ enum machine_mode mode;
+ const char *element_type_name;
+ const char *mangled_name;
+} aarch64_simd_mangle_map_entry;
+
+static aarch64_simd_mangle_map_entry aarch64_simd_mangle_map[] = {
+ /* 64-bit containerized types. */
+ { V8QImode, "__builtin_aarch64_simd_qi", "10__Int8x8_t" },
+ { V8QImode, "__builtin_aarch64_simd_uqi", "11__Uint8x8_t" },
+ { V4HImode, "__builtin_aarch64_simd_hi", "11__Int16x4_t" },
+ { V4HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x4_t" },
+ { V2SImode, "__builtin_aarch64_simd_si", "11__Int32x2_t" },
+ { V2SImode, "__builtin_aarch64_simd_usi", "12__Uint32x2_t" },
+ { V2SFmode, "__builtin_aarch64_simd_sf", "13__Float32x2_t" },
+ { V8QImode, "__builtin_aarch64_simd_poly8", "11__Poly8x8_t" },
+ { V4HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x4_t" },
+ /* 128-bit containerized types. */
+ { V16QImode, "__builtin_aarch64_simd_qi", "11__Int8x16_t" },
+ { V16QImode, "__builtin_aarch64_simd_uqi", "12__Uint8x16_t" },
+ { V8HImode, "__builtin_aarch64_simd_hi", "11__Int16x8_t" },
+ { V8HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x8_t" },
+ { V4SImode, "__builtin_aarch64_simd_si", "11__Int32x4_t" },
+ { V4SImode, "__builtin_aarch64_simd_usi", "12__Uint32x4_t" },
+ { V2DImode, "__builtin_aarch64_simd_di", "11__Int64x2_t" },
+ { V2DImode, "__builtin_aarch64_simd_udi", "12__Uint64x2_t" },
+ { V4SFmode, "__builtin_aarch64_simd_sf", "13__Float32x4_t" },
+ { V2DFmode, "__builtin_aarch64_simd_df", "13__Float64x2_t" },
+ { V16QImode, "__builtin_aarch64_simd_poly8", "12__Poly8x16_t" },
+ { V8HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x8_t" },
+ { VOIDmode, NULL, NULL }
+};
+
/* Implement TARGET_MANGLE_TYPE. */
const char *
@@ -5863,6 +5907,26 @@ aarch64_mangle_type (const_tree type)
if (lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
return "St9__va_list";
+ /* Check the mode of the vector type, and the name of the vector
+ element type, against the table. */
+ if (TREE_CODE (type) == VECTOR_TYPE)
+ {
+ aarch64_simd_mangle_map_entry *pos = aarch64_simd_mangle_map;
+
+ while (pos->mode != VOIDmode)
+ {
+ tree elt_type = TREE_TYPE (type);
+
+ if (pos->mode == TYPE_MODE (type)
+ && TREE_CODE (TYPE_NAME (elt_type)) == TYPE_DECL
+ && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type))),
+ pos->element_type_name))
+ return pos->mangled_name;
+
+ pos++;
+ }
+ }
+
/* Use the default mangling. */
return NULL;
}
new file mode 100644
@@ -0,0 +1,55 @@
+// Test that AArch64 AdvSIMD (NEON) vector types have their names mangled
+// correctly.
+
+// { dg-do compile { target { aarch64*-*-* } } }
+
+#include <arm_neon.h>
+
+void f0 (int8x8_t a) {}
+void f1 (int16x4_t a) {}
+void f2 (int32x2_t a) {}
+void f3 (uint8x8_t a) {}
+void f4 (uint16x4_t a) {}
+void f5 (uint32x2_t a) {}
+void f6 (float32x2_t a) {}
+void f7 (poly8x8_t a) {}
+void f8 (poly16x4_t a) {}
+
+void f9 (int8x16_t a) {}
+void f10 (int16x8_t a) {}
+void f11 (int32x4_t a) {}
+void f12 (int64x2_t a) {}
+void f13 (uint8x16_t a) {}
+void f14 (uint16x8_t a) {}
+void f15 (uint32x4_t a) {}
+void f16 (uint64x2_t a) {}
+void f17 (float32x4_t a) {}
+void f18 (float64x2_t a) {}
+void f19 (poly8x16_t a) {}
+void f20 (poly16x8_t a) {}
+
+void f21 (int8x16_t, int8x16_t) {}
+
+
+// { dg-final { scan-assembler "_Z2f010__Int8x8_t:" } }
+// { dg-final { scan-assembler "_Z2f111__Int16x4_t:" } }
+// { dg-final { scan-assembler "_Z2f211__Int32x2_t:" } }
+// { dg-final { scan-assembler "_Z2f311__Uint8x8_t:" } }
+// { dg-final { scan-assembler "_Z2f412__Uint16x4_t:" } }
+// { dg-final { scan-assembler "_Z2f512__Uint32x2_t:" } }
+// { dg-final { scan-assembler "_Z2f613__Float32x2_t:" } }
+// { dg-final { scan-assembler "_Z2f711__Poly8x8_t:" } }
+// { dg-final { scan-assembler "_Z2f812__Poly16x4_t:" } }
+// { dg-final { scan-assembler "_Z2f911__Int8x16_t:" } }
+// { dg-final { scan-assembler "_Z3f1011__Int16x8_t:" } }
+// { dg-final { scan-assembler "_Z3f1111__Int32x4_t:" } }
+// { dg-final { scan-assembler "_Z3f1211__Int64x2_t:" } }
+// { dg-final { scan-assembler "_Z3f1312__Uint8x16_t:" } }
+// { dg-final { scan-assembler "_Z3f1412__Uint16x8_t:" } }
+// { dg-final { scan-assembler "_Z3f1512__Uint32x4_t:" } }
+// { dg-final { scan-assembler "_Z3f1612__Uint64x2_t:" } }
+// { dg-final { scan-assembler "_Z3f1713__Float32x4_t:" } }
+// { dg-final { scan-assembler "_Z3f1813__Float64x2_t:" } }
+// { dg-final { scan-assembler "_Z3f1912__Poly8x16_t:" } }
+// { dg-final { scan-assembler "_Z3f2012__Poly16x8_t:" } }
+// { dg-final { scan-assembler "_Z3f2111__Int8x16_tS_:" } }