@@ -18741,6 +18741,34 @@ vcmlaq_rot270_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b,
#pragma GCC pop_options
#endif
+/* AdvSIMD 8-bit Integer Matrix Multiply (I8MM) intrinsics. */
+
+#pragma GCC push_options
+#pragma GCC target ("arch=armv8.2-a+i8mm")
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmmlaq_s32 (int32x4_t __r, int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_neon_smmlav16qi (__r, __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmmlaq_u32 (uint32x4_t __r, uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_neon_ummlav16qi_uuuu (__r, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vusmmlaq_s32 (int32x4_t __r, uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_neon_usmmlav16qi_ssus (__r, __a, __b);
+}
+
+#pragma GCC pop_options
+
#ifdef __cplusplus
}
#endif
@@ -373,3 +373,7 @@ VAR2 (MAC_LANE_PAIR, vcmlaq_lane0, v4sf, v8hf)
VAR2 (MAC_LANE_PAIR, vcmlaq_lane90, v4sf, v8hf)
VAR2 (MAC_LANE_PAIR, vcmlaq_lane180, v4sf, v8hf)
VAR2 (MAC_LANE_PAIR, vcmlaq_lane270, v4sf, v8hf)
+
+VAR1 (TERNOP, smmla, v16qi)
+VAR1 (UTERNOP, ummla, v16qi)
+VAR1 (USTERNOP, usmmla, v16qi)
\ No newline at end of file
@@ -471,6 +471,8 @@
(define_int_iterator VCADD [UNSPEC_VCADD90 UNSPEC_VCADD270])
(define_int_iterator VCMLA [UNSPEC_VCMLA UNSPEC_VCMLA90 UNSPEC_VCMLA180 UNSPEC_VCMLA270])
+(define_int_iterator MATMUL [UNSPEC_MATMUL_S UNSPEC_MATMUL_U UNSPEC_MATMUL_US])
+
;;----------------------------------------------------------------------------
;; Mode attributes
;;----------------------------------------------------------------------------
@@ -883,6 +885,7 @@
(UNSPEC_VMLSL_S_LANE "s") (UNSPEC_VMLSL_U_LANE "u")
(UNSPEC_VMULL_S "s") (UNSPEC_VMULL_U "u") (UNSPEC_VMULL_P "p")
(UNSPEC_VMULL_S_LANE "s") (UNSPEC_VMULL_U_LANE "u")
+ (UNSPEC_MATMUL_S "s") (UNSPEC_MATMUL_U "u") (UNSPEC_MATMUL_US "us")
(UNSPEC_VSUBL_S "s") (UNSPEC_VSUBL_U "u")
(UNSPEC_VSUBW_S "s") (UNSPEC_VSUBW_U "u")
(UNSPEC_VHSUB_S "s") (UNSPEC_VHSUB_U "u")
@@ -1089,6 +1092,9 @@
(UNSPEC_SMUADX "smuadx") (UNSPEC_SSAT16 "ssat16")
(UNSPEC_USAT16 "usat16")])
+(define_int_attr mmla_sfx [(UNSPEC_MATMUL_S "s8") (UNSPEC_MATMUL_U "u8")
+ (UNSPEC_MATMUL_US "s8")])
+
;; Both kinds of return insn.
(define_code_iterator RETURNS [return simple_return])
(define_code_attr return_str [(return "") (simple_return "simple_")])
@@ -6552,3 +6552,14 @@ if (BYTES_BIG_ENDIAN)
"vabd.<V_if_elem> %<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_fp_abd_s<q>")]
)
+
+(define_insn "neon_<sup>mmlav16qi"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (plus:V4SI (match_operand:V4SI 1 "register_operand" "0")
+ (unspec:V4SI [(match_operand:V16QI 2 "register_operand" "w")
+ (match_operand:V16QI 3 "register_operand" "w")]
+ MATMUL)))]
+ "TARGET_I8MM"
+ "v<sup>mmla.<mmla_sfx>\t%q0, %q2, %q3"
+ [(set_attr "type" "neon_mla_s_q")]
+)
\ No newline at end of file
@@ -493,4 +493,7 @@
UNSPEC_VCMLA90
UNSPEC_VCMLA180
UNSPEC_VCMLA270
+ UNSPEC_MATMUL_S
+ UNSPEC_MATMUL_U
+ UNSPEC_MATMUL_US
])
new file mode 100644
@@ -0,0 +1,37 @@
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_2a_i8mm_ok } */
+/* { dg-options "-save-temps -O2" } */
+/* { dg-additional-options "-march=armv8.2-a+i8mm" } */
+
+#include "arm_neon.h"
+
+extern void abort();
+
+#define VAR4(v) {v, v, v, v}
+#define VAR16(v) {v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v}
+#define TEST(t, f, r, a, b, ...) { \
+ t##32x4_t f##_ref = { __VA_ARGS__ }; \
+ t##32x4_t f##_out = f (r, a, b); \
+ for (int i = 0; i < 4; i++) \
+ if (f##_out[i] != f##_ref[i]) \
+ abort(); \
+}
+
+int
+main()
+{
+ int32x4_t s32 = VAR4(-1);
+ uint32x4_t u32 = VAR4(1);
+ int8x16_t s8 = VAR16(-1);
+ uint8x16_t u8 = VAR16(1);
+
+ TEST(int, vmmlaq_s32, s32, s8, s8, 7, 7, 7, 7);
+ TEST(uint, vmmlaq_u32, u32, u8, u8, 9, 9, 9, 9);
+ TEST(int, vusmmlaq_s32, s32, u8, s8, -9, -9, -9, -9);
+
+ return 0;
+}
+
+/* { dg-final { scan-assembler {vsmmla.s8\tq[0-9]+, q[0-9]+, q[0-9]+} } } */
+/* { dg-final { scan-assembler {vummla.u8\tq[0-9]+, q[0-9]+, q[0-9]+} } } */
+/* { dg-final { scan-assembler {vusmmla.s8\tq[0-9]+, q[0-9]+, q[0-9]+} } } */
\ No newline at end of file