@@ -6495,6 +6495,470 @@ _mm512_maskz_fmul_round_pch (__mmask16 __A, __m512h __B,
#endif /* __OPTIMIZE__ */
+/* Intrinsics vf[,c]maddcsh. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmadd_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+#ifdef __AVX512VL__
+ return (__m128h) __builtin_ia32_movaps128_mask (
+ (__v4sf)
+ __builtin_ia32_vfcmaddcsh_v8hf_mask_round ((__v8hf) __D,
+ (__v8hf) __A,
+ (__v8hf) __C, __B,
+ _MM_FROUND_CUR_DIRECTION),
+ (__v4sf) __A, __B);
+#else
+ return (__m128h) __builtin_ia32_blendvps ((__v4sf) __A,
+ (__v4sf)
+ __builtin_ia32_vfcmaddcsh_v8hf_mask_round ((__v8hf) __D,
+ (__v8hf) __A,
+ (__v8hf) __C, __B,
+ _MM_FROUND_CUR_DIRECTION),
+ (__v4sf) _mm_set_ss ((float) ((int) __B << 31)));
+#endif
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fcmadd_sch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D)
+{
+ return (__m128h) _mm_move_ss ((__m128) __C,
+ (__m128)
+ __builtin_ia32_vfcmaddcsh_v8hf_mask_round ((__v8hf) __C,
+ (__v8hf) __A,
+ (__v8hf) __B, __D,
+ _MM_FROUND_CUR_DIRECTION));
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmadd_sch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_v8hf_maskz_round((__v8hf) __D,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmadd_sch (__m128h __A, __m128h __B, __m128h __C)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmaddcsh_v8hf_round((__v8hf) __C,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+#ifdef __AVX512VL__
+ return (__m128h) __builtin_ia32_movaps128_mask (
+ (__v4sf)
+ __builtin_ia32_vfmaddcsh_v8hf_mask_round ((__v8hf) __D,
+ (__v8hf) __A,
+ (__v8hf) __C, __B,
+ _MM_FROUND_CUR_DIRECTION),
+ (__v4sf) __A, __B);
+#else
+ return (__m128h) __builtin_ia32_blendvps ((__v4sf) __A,
+ (__v4sf)
+ __builtin_ia32_vfmaddcsh_v8hf_mask_round ((__v8hf) __D,
+ (__v8hf) __A,
+ (__v8hf) __C, __B,
+ _MM_FROUND_CUR_DIRECTION),
+ (__v4sf) _mm_set_ss ((float) ((int) __B << 31)));
+#endif
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_sch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D)
+{
+ return (__m128h) _mm_move_ss ((__m128) __C,
+ (__m128)
+ __builtin_ia32_vfmaddcsh_v8hf_mask_round ((__v8hf) __C,
+ (__v8hf) __A,
+ (__v8hf) __B, __D,
+ _MM_FROUND_CUR_DIRECTION));
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_sch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_v8hf_maskz_round((__v8hf) __D,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmadd_sch (__m128h __A, __m128h __B, __m128h __C)
+{
+ return (__m128h)
+ __builtin_ia32_vfmaddcsh_v8hf_round((__v8hf) __C,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmadd_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+#ifdef __AVX512VL__
+ return (__m128h) __builtin_ia32_movaps128_mask (
+ (__v4sf)
+ __builtin_ia32_vfcmaddcsh_v8hf_mask_round ((__v8hf) __D,
+ (__v8hf) __A,
+ (__v8hf) __C,
+ __B, __E),
+ (__v4sf) __A, __B);
+#else
+ return (__m128h) __builtin_ia32_blendvps ((__v4sf) __A,
+ (__v4sf)
+ __builtin_ia32_vfcmaddcsh_v8hf_mask_round ((__v8hf) __D,
+ (__v8hf) __A,
+ (__v8hf) __C,
+ __B, __E),
+ (__v4sf) _mm_set_ss ((float) ((int) __B << 31)));
+#endif
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fcmadd_round_sch (__m128h __A, __m128h __B, __m128h __C,
+ __mmask8 __D, const int __E)
+{
+ return (__m128h) _mm_move_ss ((__m128) __C,
+ (__m128)
+ __builtin_ia32_vfcmaddcsh_v8hf_mask_round ((__v8hf) __C,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ __D, __E));
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmadd_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)__builtin_ia32_vfcmaddcsh_v8hf_maskz_round((__v8hf) __D,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ __A, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, const int __D)
+{
+ return (__m128h)__builtin_ia32_vfcmaddcsh_v8hf_round((__v8hf) __C,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+#ifdef __AVX512VL__
+ return (__m128h) __builtin_ia32_movaps128_mask (
+ (__v4sf)
+ __builtin_ia32_vfmaddcsh_v8hf_mask_round ((__v8hf) __D,
+ (__v8hf) __A,
+ (__v8hf) __C,
+ __B, __E),
+ (__v4sf) __A, __B);
+#else
+ return (__m128h) __builtin_ia32_blendvps ((__v4sf) __A,
+ (__v4sf)
+ __builtin_ia32_vfmaddcsh_v8hf_mask_round ((__v8hf) __D,
+ (__v8hf) __A,
+ (__v8hf) __C,
+ __B, __E),
+ (__v4sf) _mm_set_ss ((float) ((int) __B << 31)));
+#endif
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_round_sch (__m128h __A, __m128h __B, __m128h __C,
+ __mmask8 __D, const int __E)
+{
+ return (__m128h) _mm_move_ss ((__m128) __C,
+ (__m128)
+ __builtin_ia32_vfmaddcsh_v8hf_mask_round ((__v8hf) __C,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ __D, __E));
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)__builtin_ia32_vfmaddcsh_v8hf_maskz_round((__v8hf) __D,
+ (__v8hf) __B,
+ (__v8hf) __C,
+ __A, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, const int __D)
+{
+ return (__m128h)__builtin_ia32_vfmaddcsh_v8hf_round((__v8hf) __C,
+ (__v8hf) __A,
+ (__v8hf) __B,
+ __D);
+}
+
+#else
+#ifdef __AVX512VL__
+#define _mm_mask_fcmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) __builtin_ia32_movaps128_mask ( \
+ (__v4sf) \
+ __builtin_ia32_vfcmaddcsh_v8hf_mask_round ((__v8hf) (D), \
+ (__v8hf) (A), \
+ (__v8hf) (C), \
+ (B), (E)), \
+ (__v4sf) (A), (B)))
+
+#else
+#define _mm_mask_fcmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) __builtin_ia32_blendvps ((__v4sf) (A), \
+ (__v4sf) \
+ __builtin_ia32_vfcmaddcsh_v8hf_mask_round ((__v8hf) (D), \
+ (__v8hf) (A), \
+ (__v8hf) (C), \
+ (B), (E)), \
+ (__v4sf) _mm_set_ss ((float) ((int) (B) << 31))))
+#endif
+
+#define _mm_mask3_fcmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) _mm_move_ss ((__m128) (C), \
+ (__m128) \
+ __builtin_ia32_vfcmaddcsh_v8hf_mask_round ((__v8hf) (C), \
+ (__v8hf) (A), \
+ (__v8hf) (B), \
+ (D), (E))))
+
+#define _mm_maskz_fcmadd_round_sch(A, B, C, D, E) \
+ __builtin_ia32_vfcmaddcsh_v8hf_maskz_round ((D), (B), (C), (A), (E))
+
+#define _mm_fcmadd_round_sch(A, B, C, D) \
+ __builtin_ia32_vfcmaddcsh_v8hf_round ((C), (A), (B), (D))
+
+#ifdef __AVX512VL__
+#define _mm_mask_fmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) __builtin_ia32_movaps128_mask ( \
+ (__v4sf) \
+ __builtin_ia32_vfmaddcsh_v8hf_mask_round ((__v8hf) (D), \
+ (__v8hf) (A), \
+ (__v8hf) (C), \
+ (B), (E)), \
+ (__v4sf) (A), (B)))
+
+#else
+#define _mm_mask_fmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) __builtin_ia32_blendvps ((__v4sf) (A), \
+ (__v4sf) \
+ __builtin_ia32_vfmaddcsh_v8hf_mask_round ((__v8hf) (D), \
+ (__v8hf) (A), \
+ (__v8hf) (C), \
+ (B), (E)), \
+ (__v4sf) _mm_set_ss ((float) ((int) (B) << 31))))
+#endif
+
+#define _mm_mask3_fmadd_round_sch(A, B, C, D, E) \
+ ((__m128h) _mm_move_ss ((__m128) (C), \
+ (__m128) \
+ __builtin_ia32_vfmaddcsh_v8hf_mask_round ((__v8hf) (C), \
+ (__v8hf) (A), \
+ (__v8hf) (B), \
+ (D), (E))))
+
+#define _mm_maskz_fmadd_round_sch(A, B, C, D, E) \
+ __builtin_ia32_vfmaddcsh_v8hf_maskz_round ((D), (B), (C), (A), (E))
+
+#define _mm_fmadd_round_sch(A, B, C, D) \
+ __builtin_ia32_vfmaddcsh_v8hf_round ((C), (A), (B), (D))
+
+#endif /* __OPTIMIZE__ */
+
+/* Intrinsics vf[,c]mulcsh. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmul_sch (__m128h __A, __m128h __B)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_v8hf_round((__v8hf) __A,
+ (__v8hf) __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmul_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_v8hf_mask_round((__v8hf) __C,
+ (__v8hf) __D,
+ (__v8hf) __A,
+ __B, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmul_sch (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return (__m128h)
+ __builtin_ia32_vfcmulcsh_v8hf_mask_round((__v8hf) __B,
+ (__v8hf) __C,
+ _mm_setzero_ph (),
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmul_sch (__m128h __A, __m128h __B)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_v8hf_round((__v8hf) __A,
+ (__v8hf) __B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmul_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_v8hf_mask_round((__v8hf) __C,
+ (__v8hf) __D,
+ (__v8hf) __A,
+ __B, _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmul_sch (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return (__m128h)
+ __builtin_ia32_vfmulcsh_v8hf_mask_round((__v8hf) __B,
+ (__v8hf) __C,
+ _mm_setzero_ph (),
+ __A, _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fcmul_round_sch (__m128h __A, __m128h __B, const int __D)
+{
+ return (__m128h)__builtin_ia32_vfcmulcsh_v8hf_round((__v8hf) __A,
+ (__v8hf) __B,
+ __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fcmul_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)__builtin_ia32_vfcmulcsh_v8hf_mask_round((__v8hf) __C,
+ (__v8hf) __D,
+ (__v8hf) __A,
+ __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fcmul_round_sch (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __E)
+{
+ return (__m128h)__builtin_ia32_vfcmulcsh_v8hf_mask_round((__v8hf) __B,
+ (__v8hf) __C,
+ _mm_setzero_ph (),
+ __A, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fmul_round_sch (__m128h __A, __m128h __B, const int __D)
+{
+ return (__m128h)__builtin_ia32_vfmulcsh_v8hf_round((__v8hf) __A,
+ (__v8hf) __B, __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmul_round_sch (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return (__m128h)__builtin_ia32_vfmulcsh_v8hf_mask_round((__v8hf) __C,
+ (__v8hf) __D,
+ (__v8hf) __A,
+ __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmul_round_sch (__mmask8 __A, __m128h __B, __m128h __C, const int __E)
+{
+ return (__m128h)__builtin_ia32_vfmulcsh_v8hf_mask_round((__v8hf) __B,
+ (__v8hf) __C,
+ _mm_setzero_ph (),
+ __A, __E);
+}
+
+#else
+#define _mm_fcmul_round_sch(__A, __B, __D) \
+ (__m128h)__builtin_ia32_vfcmulcsh_v8hf_round((__v8hf) __A,(__v8hf) __B, __D)
+
+#define _mm_mask_fcmul_round_sch(__A, __B, __C, __D, __E) \
+ (__m128h)__builtin_ia32_vfcmulcsh_v8hf_mask_round((__v8hf) __C, \
+ (__v8hf) __D, \
+ (__v8hf) __A, \
+ __B, __E)
+
+#define _mm_maskz_fcmul_round_sch(__A, __B, __C, __E) \
+ (__m128h)__builtin_ia32_vfcmulcsh_v8hf_mask_round((__v8hf) __B, \
+ (__v8hf) __C, \
+ _mm_setzero_ph(), \
+ __A, __E)
+
+#define _mm_fmul_round_sch(__A, __B, __D) \
+ (__m128h)__builtin_ia32_vfmulcsh_v8hf_round((__v8hf) __A,(__v8hf) __B, __D)
+
+#define _mm_mask_fmul_round_sch(__A, __B, __C, __D, __E) \
+ (__m128h)__builtin_ia32_vfmulcsh_v8hf_mask_round((__v8hf) __C, \
+ (__v8hf) __D, \
+ (__v8hf) __A, \
+ __B, __E)
+
+#define _mm_maskz_fmul_round_sch(__A, __B, __C, __E) \
+ (__m128h)__builtin_ia32_vfmulcsh_v8hf_mask_round((__v8hf) __B, \
+ (__v8hf) __C, \
+ _mm_setzero_ph (), \
+ __A, __E)
+
+#endif /* __OPTIMIZE__ */
+
#ifdef __DISABLE_AVX512FP16__
#undef __DISABLE_AVX512FP16__
#pragma GCC pop_options
@@ -3231,6 +3231,16 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fcmulc_v32hf_round, "__
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fcmulc_v32hf_mask_round, "__builtin_ia32_vfcmulcph_v32hf_mask_round", IX86_BUILTIN_VFCMULCPH_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmulc_v32hf_round, "__builtin_ia32_vfmulcph_v32hf_round", IX86_BUILTIN_VFMULCPH_V32HF_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmulc_v32hf_mask_round, "__builtin_ia32_vfmulcph_v32hf_mask_round", IX86_BUILTIN_VFMULCPH_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fma_fcmaddcsh_v8hf_round, "__builtin_ia32_vfcmaddcsh_v8hf_round", IX86_BUILTIN_VFCMADDCSH_V8HF_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmaddcsh_v8hf_mask_round, "__builtin_ia32_vfcmaddcsh_v8hf_mask_round", IX86_BUILTIN_VFCMADDCSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmaddcsh_v8hf_maskz_round, "__builtin_ia32_vfcmaddcsh_v8hf_maskz_round", IX86_BUILTIN_VFCMADDCSH_V8HF_MASKZ_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fma_fmaddcsh_v8hf_round, "__builtin_ia32_vfmaddcsh_v8hf_round", IX86_BUILTIN_VFMADDCSH_V8HF_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmaddcsh_v8hf_mask_round, "__builtin_ia32_vfmaddcsh_v8hf_mask_round", IX86_BUILTIN_VFMADDCSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmaddcsh_v8hf_maskz_round, "__builtin_ia32_vfmaddcsh_v8hf_maskz_round", IX86_BUILTIN_VFMADDCSH_V8HF_MASKZ_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmulcsh_v8hf_round, "__builtin_ia32_vfcmulcsh_v8hf_round", IX86_BUILTIN_VFCMULCSH_V8HF_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmulcsh_v8hf_mask_round, "__builtin_ia32_vfcmulcsh_v8hf_mask_round", IX86_BUILTIN_VFCMULCSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_round, "__builtin_ia32_vfmulcsh_v8hf_round", IX86_BUILTIN_VFMULCSH_V8HF_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_mask_round, "__builtin_ia32_vfmulcsh_v8hf_mask_round", IX86_BUILTIN_VFMULCSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
@@ -5597,6 +5597,82 @@ (define_insn "<avx512>_<complexopname>_<mode><maskc_name><round_name>"
[(set_attr "type" "ssemul")
(set_attr "mode" "<MODE>")])
+(define_expand "avx512fp16_fmaddcsh_v8hf_maskz<round_expand_name>"
+ [(match_operand:V8HF 0 "register_operand")
+ (match_operand:V8HF 1 "<round_expand_nimm_predicate>")
+ (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
+ (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
+ (match_operand:QI 4 "register_operand")]
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+{
+ emit_insn (gen_avx512fp16_fma_fmaddcsh_v8hf_maskz<round_expand_name> (
+ operands[0], operands[1], operands[2], operands[3],
+ CONST0_RTX (V8HFmode), operands[4]<round_expand_operand>));
+ DONE;
+})
+
+(define_expand "avx512fp16_fcmaddcsh_v8hf_maskz<round_expand_name>"
+ [(match_operand:V8HF 0 "register_operand")
+ (match_operand:V8HF 1 "<round_expand_nimm_predicate>")
+ (match_operand:V8HF 2 "<round_expand_nimm_predicate>")
+ (match_operand:V8HF 3 "<round_expand_nimm_predicate>")
+ (match_operand:QI 4 "register_operand")]
+ "TARGET_AVX512FP16 && <round_mode512bit_condition>"
+{
+ emit_insn (gen_avx512fp16_fma_fcmaddcsh_v8hf_maskz<round_expand_name> (
+ operands[0], operands[1], operands[2], operands[3],
+ CONST0_RTX (V8HFmode), operands[4]<round_expand_operand>));
+ DONE;
+})
+
+(define_insn "avx512fp16_fma_<complexopname>sh_v8hf<mask_scalarcz_name><round_scalarcz_name>"
+ [(set (match_operand:V8HF 0 "register_operand" "=v")
+ (vec_merge:V8HF
+ (unspec:V8HF
+ [(match_operand:V8HF 1 "<round_scalarcz_nimm_predicate>" "0")
+ (match_operand:V8HF 2 "<round_scalarcz_nimm_predicate>" "v")
+ (match_operand:V8HF 3 "<round_scalarcz_nimm_predicate>" "<round_scalarcz_constraint>")]
+ UNSPEC_COMPLEX_F_C_MA)
+ (match_dup 2)
+ (const_int 3)))]
+ "TARGET_AVX512FP16"
+ "v<complexopname>sh\t{<round_scalarcz_mask_op4>%3, %2, %0<mask_scalarcz_operand4>|%0<mask_scalarcz_operand4>, %2, %3<round_scalarcz_maskcz_mask_op4>}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V8HF")])
+
+(define_insn "avx512fp16_<complexopname>sh_v8hf_mask<round_name>"
+ [(set (match_operand:V8HF 0 "register_operand" "=v")
+ (vec_merge:V8HF
+ (vec_merge:V8HF
+ (unspec:V8HF
+ [(match_operand:V8HF 1 "<round_nimm_predicate>" "0")
+ (match_operand:V8HF 2 "<round_nimm_predicate>" "v")
+ (match_operand:V8HF 3 "<round_nimm_predicate>" "<round_constraint>")]
+ UNSPEC_COMPLEX_F_C_MA)
+ (match_dup 1)
+ (unspec:QI [(match_operand:QI 4 "register_operand" "Yk")]
+ UNSPEC_COMPLEX_MASK))
+ (match_dup 2)
+ (const_int 3)))]
+ "TARGET_AVX512FP16"
+ "v<complexopname>sh\t{<round_op5>%3, %2, %0%{%4%}|%0%{%4%}, %2, %3<round_op5>}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V8HF")])
+
+(define_insn "avx512fp16_<complexopname>sh_v8hf<mask_scalarc_name><round_scalarcz_name>"
+ [(set (match_operand:V8HF 0 "register_operand" "=v")
+ (vec_merge:V8HF
+ (unspec:V8HF
+ [(match_operand:V8HF 1 "nonimmediate_operand" "v")
+ (match_operand:V8HF 2 "<round_scalarcz_nimm_predicate>" "<round_scalarcz_constraint>")]
+ UNSPEC_COMPLEX_F_C_MUL)
+ (match_dup 1)
+ (const_int 3)))]
+ "TARGET_AVX512FP16"
+ "v<complexopname>sh\t{<round_scalarc_mask_op3>%2, %1, %0<mask_scalarc_operand3>|%0<mask_scalarc_operand3>, %1, %2<round_scalarc_mask_op3>}"
+ [(set_attr "type" "ssemul")
+ (set_attr "mode" "V8HF")])
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel half-precision floating point conversion operations
@@ -308,8 +308,12 @@ (define_subst "mask_expand4"
(match_operand:<avx512fmaskmode> 5 "register_operand")])
(define_subst_attr "mask_scalar_name" "mask_scalar" "" "_mask")
+(define_subst_attr "mask_scalarcz_name" "mask_scalarcz" "" "_maskz")
+(define_subst_attr "mask_scalarc_name" "mask_scalarc" "" "_mask")
+(define_subst_attr "mask_scalarc_operand3" "mask_scalarc" "" "%{%4%}%N3")
(define_subst_attr "mask_scalar_operand3" "mask_scalar" "" "%{%4%}%N3")
(define_subst_attr "mask_scalar_operand4" "mask_scalar" "" "%{%5%}%N4")
+(define_subst_attr "mask_scalarcz_operand4" "mask_scalarcz" "" "%{%5%}%N4")
(define_subst "mask_scalar"
[(set (match_operand:SUBST_V 0)
@@ -327,12 +331,55 @@ (define_subst "mask_scalar"
(match_dup 2)
(const_int 1)))])
+(define_subst "mask_scalarcz"
+ [(set (match_operand:SUBST_CV 0)
+ (vec_merge:SUBST_CV
+ (match_operand:SUBST_CV 1)
+ (match_operand:SUBST_CV 2)
+ (const_int 3)))]
+ "TARGET_AVX512F"
+ [(set (match_dup 0)
+ (vec_merge:SUBST_CV
+ (vec_merge:SUBST_CV
+ (match_dup 1)
+ (match_operand:SUBST_CV 3 "const0_operand" "C")
+ (unspec:<avx512fmaskmode>
+ [(match_operand:<avx512fmaskcmode> 4 "register_operand" "Yk")]
+ UNSPEC_COMPLEX_MASK))
+ (match_dup 2)
+ (const_int 3)))])
+
+(define_subst "mask_scalarc"
+ [(set (match_operand:SUBST_CV 0)
+ (vec_merge:SUBST_CV
+ (match_operand:SUBST_CV 1)
+ (match_operand:SUBST_CV 2)
+ (const_int 3)))]
+ "TARGET_AVX512F"
+ [(set (match_dup 0)
+ (vec_merge:SUBST_CV
+ (vec_merge:SUBST_CV
+ (match_dup 1)
+ (match_operand:SUBST_CV 3 "nonimm_or_0_operand" "0C")
+ (unspec:<avx512fmaskmode>
+ [(match_operand:<avx512fmaskcmode> 4 "register_operand" "Yk")]
+ UNSPEC_COMPLEX_MASK))
+ (match_dup 2)
+ (const_int 3)))])
+
(define_subst_attr "round_scalar_name" "round_scalar" "" "_round")
+(define_subst_attr "round_scalarcz_name" "round_scalarcz" "" "_round")
(define_subst_attr "round_scalar_mask_operand3" "mask_scalar" "%R3" "%R5")
+(define_subst_attr "round_scalarc_mask_operand3" "mask_scalarc" "%R3" "%R5")
+(define_subst_attr "round_scalarcz_mask_operand4" "mask_scalarcz" "%R4" "%R6")
(define_subst_attr "round_scalar_mask_op3" "round_scalar" "" "<round_scalar_mask_operand3>")
+(define_subst_attr "round_scalarc_mask_op3" "round_scalarcz" "" "<round_scalarc_mask_operand3>")
+(define_subst_attr "round_scalarcz_mask_op4" "round_scalarcz" "" "<round_scalarcz_mask_operand4>")
(define_subst_attr "round_scalar_constraint" "round_scalar" "vm" "v")
+(define_subst_attr "round_scalarcz_constraint" "round_scalarcz" "vm" "v")
(define_subst_attr "round_scalar_prefix" "round_scalar" "vex" "evex")
(define_subst_attr "round_scalar_nimm_predicate" "round_scalar" "nonimmediate_operand" "register_operand")
+(define_subst_attr "round_scalarcz_nimm_predicate" "round_scalarcz" "vector_operand" "register_operand")
(define_subst "round_scalar"
[(set (match_operand:SUBST_V 0)
@@ -350,6 +397,22 @@ (define_subst "round_scalar"
(match_operand:SI 3 "const_4_or_8_to_11_operand")]
UNSPEC_EMBEDDED_ROUNDING))])
+(define_subst "round_scalarcz"
+ [(set (match_operand:SUBST_V 0)
+ (vec_merge:SUBST_V
+ (match_operand:SUBST_V 1)
+ (match_operand:SUBST_V 2)
+ (const_int 3)))]
+ "TARGET_AVX512F"
+ [(set (match_dup 0)
+ (unspec:SUBST_V [
+ (vec_merge:SUBST_V
+ (match_dup 1)
+ (match_dup 2)
+ (const_int 3))
+ (match_operand:SI 3 "const_4_or_8_to_11_operand")]
+ UNSPEC_EMBEDDED_ROUNDING))])
+
(define_subst_attr "round_saeonly_scalar_name" "round_saeonly_scalar" "" "_round")
(define_subst_attr "round_saeonly_scalar_mask_operand3" "mask_scalar" "%r3" "%r5")
(define_subst_attr "round_saeonly_scalar_mask_operand4" "mask_scalar" "%r4" "%r6")
@@ -797,6 +797,16 @@
#define __builtin_ia32_vfmulcph_v32hf_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcph_v32hf_mask_round(A, C, D, B, 8)
#define __builtin_ia32_vfcmulcph_v32hf_round(A, B, C) __builtin_ia32_vfcmulcph_v32hf_round(A, B, 8)
#define __builtin_ia32_vfcmulcph_v32hf_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcph_v32hf_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_v8hf_round(A, B, C, D) __builtin_ia32_vfmaddcsh_v8hf_round(A, B, C, 8)
+#define __builtin_ia32_vfmaddcsh_v8hf_mask_round(A, C, D, B, E) __builtin_ia32_vfmaddcsh_v8hf_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_v8hf_maskz_round(B, C, D, A, E) __builtin_ia32_vfmaddcsh_v8hf_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfcmaddcsh_v8hf_round(A, B, C, D) __builtin_ia32_vfcmaddcsh_v8hf_round(A, B, C, 8)
+#define __builtin_ia32_vfcmaddcsh_v8hf_mask_round(A, C, D, B, E) __builtin_ia32_vfcmaddcsh_v8hf_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcsh_v8hf_maskz_round(B, C, D, A, E) __builtin_ia32_vfcmaddcsh_v8hf_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfmulcsh_v8hf_round(A, B, C) __builtin_ia32_vfmulcsh_v8hf_round(A, B, 8)
+#define __builtin_ia32_vfmulcsh_v8hf_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcsh_v8hf_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmulcsh_v8hf_round(A, B, C) __builtin_ia32_vfcmulcsh_v8hf_round(A, B, 8)
+#define __builtin_ia32_vfcmulcsh_v8hf_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcsh_v8hf_mask_round(A, C, D, B, 8)
/* avx512fp16vlintrin.h */
#define __builtin_ia32_vcmpph_v8hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v8hf_mask(A, B, 1, D)
@@ -814,6 +814,16 @@
#define __builtin_ia32_vfmulcph_v32hf_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcph_v32hf_mask_round(A, C, D, B, 8)
#define __builtin_ia32_vfcmulcph_v32hf_round(A, B, C) __builtin_ia32_vfcmulcph_v32hf_round(A, B, 8)
#define __builtin_ia32_vfcmulcph_v32hf_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcph_v32hf_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_v8hf_round(A, B, C, D) __builtin_ia32_vfmaddcsh_v8hf_round(A, B, C, 8)
+#define __builtin_ia32_vfmaddcsh_v8hf_mask_round(A, C, D, B, E) __builtin_ia32_vfmaddcsh_v8hf_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_v8hf_maskz_round(B, C, D, A, E) __builtin_ia32_vfmaddcsh_v8hf_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfcmaddcsh_v8hf_round(A, B, C, D) __builtin_ia32_vfcmaddcsh_v8hf_round(A, B, C, 8)
+#define __builtin_ia32_vfcmaddcsh_v8hf_mask_round(A, C, D, B, E) __builtin_ia32_vfcmaddcsh_v8hf_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcsh_v8hf_maskz_round(B, C, D, A, E) __builtin_ia32_vfcmaddcsh_v8hf_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfmulcsh_v8hf_round(A, B, C) __builtin_ia32_vfmulcsh_v8hf_round(A, B, 8)
+#define __builtin_ia32_vfmulcsh_v8hf_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcsh_v8hf_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmulcsh_v8hf_round(A, B, C) __builtin_ia32_vfcmulcsh_v8hf_round(A, B, 8)
+#define __builtin_ia32_vfcmulcsh_v8hf_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcsh_v8hf_mask_round(A, C, D, B, 8)
/* avx512fp16vlintrin.h */
#define __builtin_ia32_vcmpph_v8hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v8hf_mask(A, B, 1, D)
@@ -774,6 +774,8 @@ test_2 (_mm_cvt_roundi32_sh, __m128h, __m128h, int, 8)
test_2 (_mm_cvt_roundu32_sh, __m128h, __m128h, unsigned, 8)
test_2 (_mm512_fmul_round_pch, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_fcmul_round_pch, __m512h, __m512h, __m512h, 8)
+test_2 (_mm_fmul_round_sch, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_fcmul_round_sch, __m128h, __m128h, __m128h, 8)
test_2x (_mm512_cmp_round_ph_mask, __mmask32, __m512h, __m512h, 1, 8)
test_2x (_mm_cmp_round_sh_mask, __mmask8, __m128h, __m128h, 1, 8)
test_2x (_mm_comi_round_sh, int, __m128h, __m128h, 1, 8)
@@ -850,8 +852,12 @@ test_3 (_mm_fmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9)
test_3 (_mm_fnmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9)
test_3 (_mm512_fmadd_round_pch, __m512h, __m512h, __m512h, __m512h, 8)
test_3 (_mm512_fcmadd_round_pch, __m512h, __m512h, __m512h, __m512h, 8)
+test_3 (_mm_fmadd_round_sch, __m128h, __m128h, __m128h, __m128h, 8)
+test_3 (_mm_fcmadd_round_sch, __m128h, __m128h, __m128h, __m128h, 8)
test_3 (_mm512_maskz_fmul_round_pch, __m512h, __mmask16, __m512h, __m512h, 8)
test_3 (_mm512_maskz_fcmul_round_pch, __m512h, __mmask16, __m512h, __m512h, 8)
+test_3 (_mm_maskz_fmul_round_sch, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_maskz_fcmul_round_sch, __m128h, __mmask8, __m128h, __m128h, 8)
test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8)
test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8)
test_3x (_mm512_mask_reduce_round_ph, __m512h, __m512h, __mmask32, __m512h, 123, 8)
@@ -920,8 +926,16 @@ test_4 (_mm512_mask3_fmadd_round_pch, __m512h, __m512h, __m512h, __m512h, __mmas
test_4 (_mm512_mask3_fcmadd_round_pch, __m512h, __m512h, __m512h, __m512h, __mmask16, 8)
test_4 (_mm512_maskz_fmadd_round_pch, __m512h, __mmask16, __m512h, __m512h, __m512h, 8)
test_4 (_mm512_maskz_fcmadd_round_pch, __m512h, __mmask16, __m512h, __m512h, __m512h, 8)
+test_4 (_mm_mask_fmadd_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_fcmadd_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask3_fmadd_round_sch, __m128h, __m128h, __m128h, __m128h, __mmask8, 8)
+test_4 (_mm_mask3_fcmadd_round_sch, __m128h, __m128h, __m128h, __m128h, __mmask8, 8)
+test_4 (_mm_maskz_fmadd_round_sch, __m128h, __mmask8, __m128h, __m128h, __m128h, 8)
+test_4 (_mm_maskz_fcmadd_round_sch, __m128h, __mmask8, __m128h, __m128h, __m128h, 8)
test_4 (_mm512_mask_fmul_round_pch, __m512h, __m512h, __mmask16, __m512h, __m512h, 8)
test_4 (_mm512_mask_fcmul_round_pch, __m512h, __m512h, __mmask16, __m512h, __m512h, 8)
+test_4 (_mm_mask_fmul_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_fcmul_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
test_4x (_mm_mask_reduce_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8)
test_4x (_mm_mask_roundscale_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8)
test_4x (_mm_mask_getmant_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 1, 1)
@@ -878,6 +878,8 @@ test_2 (_mm_cvt_roundss_sh, __m128h, __m128h, __m128, 8)
test_2 (_mm_cvt_roundsd_sh, __m128h, __m128h, __m128d, 8)
test_2 (_mm512_fmul_round_pch, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_fcmul_round_pch, __m512h, __m512h, __m512h, 8)
+test_2 (_mm_fmul_round_sch, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_fcmul_round_sch, __m128h, __m128h, __m128h, 8)
test_2x (_mm512_cmp_round_ph_mask, __mmask32, __m512h, __m512h, 1, 8)
test_2x (_mm_cmp_round_sh_mask, __mmask8, __m128h, __m128h, 1, 8)
test_2x (_mm_comi_round_sh, int, __m128h, __m128h, 1, 8)
@@ -954,6 +956,10 @@ test_3 (_mm_fnmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9)
test_3 (_mm512_fmadd_round_pch, __m512h, __m512h, __m512h, __m512h, 8)
test_3 (_mm512_fcmadd_round_pch, __m512h, __m512h, __m512h, __m512h, 8)
test_3 (_mm512_maskz_fmul_round_pch, __m512h, __mmask16, __m512h, __m512h, 8)
+test_3 (_mm_maskz_fmul_round_sch, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_maskz_fcmul_round_sch, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_fmadd_round_sch, __m128h, __m128h, __m128h, __m128h, 8)
+test_3 (_mm_fcmadd_round_sch, __m128h, __m128h, __m128h, __m128h, 8)
test_3 (_mm512_maskz_fcmul_round_pch, __m512h, __mmask16, __m512h, __m512h, 8)
test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8)
test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8)
@@ -1022,8 +1028,16 @@ test_4 (_mm512_mask3_fmadd_round_pch, __m512h, __m512h, __m512h, __m512h, __mmas
test_4 (_mm512_mask3_fcmadd_round_pch, __m512h, __m512h, __m512h, __m512h, __mmask16, 8)
test_4 (_mm512_maskz_fmadd_round_pch, __m512h, __mmask16, __m512h, __m512h, __m512h, 8)
test_4 (_mm512_maskz_fcmadd_round_pch, __m512h, __mmask16, __m512h, __m512h, __m512h, 8)
+test_4 (_mm_mask_fmadd_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_fcmadd_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask3_fmadd_round_sch, __m128h, __m128h, __m128h, __m128h, __mmask8, 8)
+test_4 (_mm_mask3_fcmadd_round_sch, __m128h, __m128h, __m128h, __m128h, __mmask8, 8)
+test_4 (_mm_maskz_fmadd_round_sch, __m128h, __mmask8, __m128h, __m128h, __m128h, 8)
+test_4 (_mm_maskz_fcmadd_round_sch, __m128h, __mmask8, __m128h, __m128h, __m128h, 8)
test_4 (_mm512_mask_fmul_round_pch, __m512h, __m512h, __mmask16, __m512h, __m512h, 8)
test_4 (_mm512_mask_fcmul_round_pch, __m512h, __m512h, __mmask16, __m512h, __m512h, 8)
+test_4 (_mm_mask_fmul_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_fcmul_round_sch, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
test_4x (_mm_mask_reduce_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8)
test_4x (_mm_mask_roundscale_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8)
test_4x (_mm_mask_getmant_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 1, 1)
@@ -815,6 +815,16 @@
#define __builtin_ia32_vfmulcph_v32hf_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcph_v32hf_mask_round(A, C, D, B, 8)
#define __builtin_ia32_vfcmulcph_v32hf_round(A, B, C) __builtin_ia32_vfcmulcph_v32hf_round(A, B, 8)
#define __builtin_ia32_vfcmulcph_v32hf_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcph_v32hf_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_v8hf_round(A, B, C, D) __builtin_ia32_vfmaddcsh_v8hf_round(A, B, C, 8)
+#define __builtin_ia32_vfmaddcsh_v8hf_mask_round(A, C, D, B, E) __builtin_ia32_vfmaddcsh_v8hf_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfmaddcsh_v8hf_maskz_round(B, C, D, A, E) __builtin_ia32_vfmaddcsh_v8hf_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfcmaddcsh_v8hf_round(A, B, C, D) __builtin_ia32_vfcmaddcsh_v8hf_round(A, B, C, 8)
+#define __builtin_ia32_vfcmaddcsh_v8hf_mask_round(A, C, D, B, E) __builtin_ia32_vfcmaddcsh_v8hf_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmaddcsh_v8hf_maskz_round(B, C, D, A, E) __builtin_ia32_vfcmaddcsh_v8hf_maskz_round(B, C, D, A, 8)
+#define __builtin_ia32_vfmulcsh_v8hf_round(A, B, C) __builtin_ia32_vfmulcsh_v8hf_round(A, B, 8)
+#define __builtin_ia32_vfmulcsh_v8hf_mask_round(A, C, D, B, E) __builtin_ia32_vfmulcsh_v8hf_mask_round(A, C, D, B, 8)
+#define __builtin_ia32_vfcmulcsh_v8hf_round(A, B, C) __builtin_ia32_vfcmulcsh_v8hf_round(A, B, 8)
+#define __builtin_ia32_vfcmulcsh_v8hf_mask_round(A, C, D, B, E) __builtin_ia32_vfcmulcsh_v8hf_mask_round(A, C, D, B, 8)
/* avx512fp16vlintrin.h */
#define __builtin_ia32_vcmpph_v8hf_mask(A, B, C, D) __builtin_ia32_vcmpph_v8hf_mask(A, B, 1, D)