diff mbox series

[1/2] Enable vectorization for V2HF/V4HF rounding operations and sqrt.

Message ID 20231012060209.4130200-1-hongtao.liu@intel.com
State New
Headers show
Series [1/2] Enable vectorization for V2HF/V4HF rounding operations and sqrt. | expand

Commit Message

Liu, Hongtao Oct. 12, 2023, 6:02 a.m. UTC
For lrint/lround/lceil/lfoor is not vectorized due to vectorization
restriction. When input element size is different from output element size,
vectorization relies on the old TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
intstead of the modern standand pattern name. The patch only supports standard
pattern name, doesn't update ix86_builtin_vectorized_function.

Bootstrapped and regtested on x86_64-pc-linux-gnu{-m32,}.
Ready push to trunk.

gcc/ChangeLog:

	* config/i386/i386-expand.cc (ix86_sse_copysign_to_positive):
	Handle HFmode.
	(ix86_expand_round_sse4): Ditto.
	* config/i386/i386.md (roundhf2): New expander.
	(lroundhf<mode>2): Ditto.
	(lrinthf<mode>2): Ditto.
	(l<rounding_insn>hf<mode>2): Ditto.
	* config/i386/mmx.md (sqrt<mode>2): Ditto.
	(btrunc<mode>2): Ditto.
	(nearbyint<mode>2): Ditto.
	(rint<mode>2): Ditto.
	(lrint<mode><mmxintvecmodelower>2): Ditto.
	(floor<mode>2): Ditto.
	(lfloor<mode><mmxintvecmodelower>2): Ditto.
	(ceil<mode>2): Ditto.
	(lceil<mode><mmxintvecmodelower>2): Ditto.
	(round<mode>2): Ditto.
	(lround<mode><mmxintvecmodelower>2): Ditto.
	* config/i386/sse.md (lrint<mode><sseintvecmodelower>2): Ditto.
	(lfloor<mode><sseintvecmodelower>2): Ditto.
	(lceil<mode><sseintvecmodelower>2): Ditto.
	(lround<mode><sseintvecmodelower>2): Ditto.
	(sse4_1_round<ssescalarmodesuffix>): Extend to V8HF.
	(round<mode>2): Extend to V8HF/V16HF/V32HF.

gcc/testsuite/ChangeLog:

	* gcc.target/i386/part-vect-roundhf.c: New test.
	* gcc.target/i386/part-vect-sqrtph-1.c: New test.
---
 gcc/config/i386/i386-expand.cc                |   6 +
 gcc/config/i386/i386.md                       |  38 +++
 gcc/config/i386/mmx.md                        | 191 ++++++++++++++-
 gcc/config/i386/sse.md                        |  60 ++++-
 .../gcc.target/i386/part-vect-roundhf.c       | 217 ++++++++++++++++++
 .../gcc.target/i386/part-vect-sqrtph-1.c      |  20 ++
 6 files changed, 521 insertions(+), 11 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/i386/part-vect-roundhf.c
 create mode 100644 gcc/testsuite/gcc.target/i386/part-vect-sqrtph-1.c
diff mbox series

Patch

diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc
index 425f3531862..b81b5cc030c 100644
--- a/gcc/config/i386/i386-expand.cc
+++ b/gcc/config/i386/i386-expand.cc
@@ -18434,6 +18434,8 @@  ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
 	vmode = V4SFmode;
       else if (mode == DFmode)
 	vmode = V2DFmode;
+      else if (mode == HFmode)
+	vmode = V8HFmode;
       else
 	vmode = mode;
 
@@ -18970,6 +18972,10 @@  ix86_expand_round_sse4 (rtx op0, rtx op1)
 
   switch (mode)
     {
+    case E_HFmode:
+      gen_copysign = gen_copysignhf3;
+      gen_round = gen_sse4_1_roundhf2;
+      break;
     case E_SFmode:
       gen_copysign = gen_copysignsf3;
       gen_round = gen_sse4_1_roundsf2;
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 65a0dd025c7..41173cb3452 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -21741,6 +21741,15 @@  (define_expand "nearbyint<mode>2"
   DONE;
 })
 
+(define_expand "roundhf2"
+  [(match_operand:HF 0 "register_operand")
+   (match_operand:HF 1 "register_operand")]
+  "TARGET_AVX512FP16 && !flag_trapping_math && !flag_rounding_math"
+{
+  ix86_expand_round_sse4 (operands[0], operands[1]);
+  DONE;
+})
+
 (define_expand "round<mode>2"
   [(match_operand:X87MODEF 0 "register_operand")
    (match_operand:X87MODEF 1 "nonimmediate_operand")]
@@ -21792,6 +21801,22 @@  (define_insn "lrintxf<mode>2"
   [(set_attr "type" "fpspc")
    (set_attr "mode" "<MODE>")])
 
+(define_expand "lroundhf<mode>2"
+  [(set (match_operand:SWI248 0 "register_operand")
+     (unspec:SWI248 [(match_operand:HF 1 "nonimmediate_operand")]
+		   UNSPEC_FIX_NOTRUNC))]
+  "TARGET_AVX512FP16 && !flag_trapping_math && !flag_rounding_math"
+{
+  ix86_expand_lround (operands[0], operands[1]);
+  DONE;
+})
+
+(define_expand "lrinthf<mode>2"
+  [(set (match_operand:SWI48 0 "register_operand")
+     (unspec:SWI48 [(match_operand:HF 1 "nonimmediate_operand")]
+		   UNSPEC_FIX_NOTRUNC))]
+  "TARGET_AVX512FP16")
+
 (define_expand "lrint<MODEF:mode><SWI48:mode>2"
   [(set (match_operand:SWI48 0 "register_operand")
      (unspec:SWI48 [(match_operand:MODEF 1 "nonimmediate_operand")]
@@ -22034,6 +22059,19 @@  (define_expand "l<rounding_insn>xf<mode>2"
    && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
    && flag_unsafe_math_optimizations")
 
+(define_expand "l<rounding_insn>hf<mode>2"
+  [(set (match_operand:SWI48 0 "nonimmediate_operand")
+	(unspec:SWI48 [(match_operand:HF 1 "register_operand")]
+		    FIST_ROUNDING))]
+  "TARGET_AVX512FP16"
+{
+  rtx tmp = gen_reg_rtx (HFmode);
+  emit_insn (gen_sse4_1_roundhf2 (tmp, operands[1],
+				 GEN_INT (ROUND_<ROUNDING> | ROUND_NO_EXC)));
+  emit_insn (gen_fix_trunchf<mode>2 (operands[0], tmp));
+  DONE;
+})
+
 (define_expand "l<rounding_insn><MODEF:mode><SWI48:mode>2"
   [(parallel [(set (match_operand:SWI48 0 "nonimmediate_operand")
 		   (unspec:SWI48 [(match_operand:MODEF 1 "register_operand")]
diff --git a/gcc/config/i386/mmx.md b/gcc/config/i386/mmx.md
index c84a37a8444..8375100d4bf 100644
--- a/gcc/config/i386/mmx.md
+++ b/gcc/config/i386/mmx.md
@@ -103,7 +103,8 @@  (define_mode_attr mmxintvecmode
    (V4HF "V4HF") (V2HF "V2HI")])
 
 (define_mode_attr mmxintvecmodelower
-  [(V2SF "v2si") (V2SI "v2si") (V4HI "v4hi") (V8QI "v8qi")])
+  [(V2SF "v2si") (V2SI "v2si") (V4HI "v4hi") (V8QI "v8qi")
+   (V4HF "v4hi") (V2HF "v2hi")])
 
 ;; Mapping of vector modes to a vector mode of double size
 (define_mode_attr mmxdoublevecmode
@@ -2053,6 +2054,21 @@  (define_expand "<code><mode>3"
   DONE;
 })
 
+(define_expand "sqrt<mode>2"
+  [(set (match_operand:VHF_32_64 0 "register_operand")
+	(sqrt:VHF_32_64
+	  (match_operand:VHF_32_64 1 "nonimmediate_operand")))]
+  "TARGET_AVX512FP16 && TARGET_AVX512VL && ix86_partial_vec_fp_math"
+{
+  rtx op1 = gen_reg_rtx (V8HFmode);
+  rtx op0 = gen_reg_rtx (V8HFmode);
+
+  emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+  emit_insn (gen_sqrtv8hf2 (op0, op1));
+  emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+  DONE;
+})
+
 (define_expand "<code><mode>2"
   [(set (match_operand:VHF_32_64 0 "register_operand")
 	(absneg:VHF_32_64
@@ -2088,6 +2104,179 @@  (define_insn_and_split "*mmx_nabs<mode>2"
   [(set (match_dup 0)
 	(ior:<MODE> (match_dup 1) (match_dup 2)))])
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel half-precision floating point rounding operations.
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "btrunc<mode>2"
+  [(match_operand:VHF_32_64 0 "register_operand")
+   (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+  "TARGET_AVX512FP16 && TARGET_AVX512VL
+   && ix86_partial_vec_fp_math
+   && !flag_trapping_math"
+{
+  rtx op1 = gen_reg_rtx (V8HFmode);
+  rtx op0 = gen_reg_rtx (V8HFmode);
+
+  emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+  emit_insn (gen_btruncv8hf2 (op0, op1));
+  emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+  DONE;
+})
+
+(define_expand "nearbyint<mode>2"
+  [(match_operand:VHF_32_64 0 "register_operand")
+   (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+  "TARGET_AVX512FP16 && TARGET_AVX512VL
+   && ix86_partial_vec_fp_math"
+{
+  rtx op1 = gen_reg_rtx (V8HFmode);
+  rtx op0 = gen_reg_rtx (V8HFmode);
+
+  emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+  emit_insn (gen_nearbyintv8hf2 (op0, op1));
+  emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+  DONE;
+})
+
+(define_expand "rint<mode>2"
+  [(match_operand:VHF_32_64 0 "register_operand")
+   (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+  "TARGET_AVX512FP16 && TARGET_AVX512VL
+   && ix86_partial_vec_fp_math"
+{
+  rtx op1 = gen_reg_rtx (V8HFmode);
+  rtx op0 = gen_reg_rtx (V8HFmode);
+
+  emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+  emit_insn (gen_rintv8hf2 (op0, op1));
+  emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+  DONE;
+})
+
+(define_expand "lrint<mode><mmxintvecmodelower>2"
+  [(match_operand:<mmxintvecmode> 0 "register_operand")
+   (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+  "TARGET_AVX512FP16 && TARGET_AVX512VL
+   && ix86_partial_vec_fp_math"
+{
+  rtx op1 = gen_reg_rtx (V8HFmode);
+  rtx op0 = gen_reg_rtx (V8HFmode);
+
+  emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+  emit_insn (gen_lrintv8hfv8hi2 (op0, op1));
+  emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+  DONE;
+})
+
+(define_expand "floor<mode>2"
+  [(match_operand:VHF_32_64 0 "register_operand")
+   (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+  "TARGET_AVX512FP16 && TARGET_AVX512VL
+   && ix86_partial_vec_fp_math
+   && !flag_trapping_math"
+{
+  rtx op1 = gen_reg_rtx (V8HFmode);
+  rtx op0 = gen_reg_rtx (V8HFmode);
+
+  emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+  emit_insn (gen_floorv8hf2 (op0, op1));
+  emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+  DONE;
+})
+
+(define_expand "lfloor<mode><mmxintvecmodelower>2"
+  [(match_operand:<mmxintvecmode> 0 "register_operand")
+   (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+  "TARGET_AVX512FP16 && TARGET_AVX512VL
+   && ix86_partial_vec_fp_math
+   && !flag_trapping_math"
+{
+  rtx op1 = gen_reg_rtx (V8HFmode);
+  rtx op0 = gen_reg_rtx (V8HFmode);
+
+  emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+  emit_insn (gen_lfloorv8hfv8hi2 (op0, op1));
+  emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+  DONE;
+})
+
+(define_expand "ceil<mode>2"
+  [(match_operand:VHF_32_64 0 "register_operand")
+   (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+  "TARGET_AVX512FP16 && TARGET_AVX512VL
+   && ix86_partial_vec_fp_math
+   && !flag_trapping_math"
+{
+  rtx op1 = gen_reg_rtx (V8HFmode);
+  rtx op0 = gen_reg_rtx (V8HFmode);
+
+  emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+  emit_insn (gen_ceilv8hf2 (op0, op1));
+  emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+  DONE;
+})
+
+(define_expand "lceil<mode><mmxintvecmodelower>2"
+  [(match_operand:<mmxintvecmode> 0 "register_operand")
+   (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+  "TARGET_AVX512FP16 && TARGET_AVX512VL
+   && ix86_partial_vec_fp_math
+   && !flag_trapping_math"
+{
+  rtx op1 = gen_reg_rtx (V8HFmode);
+  rtx op0 = gen_reg_rtx (V8HFmode);
+
+  emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+  emit_insn (gen_lceilv8hfv8hi2 (op0, op1));
+  emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+  DONE;
+})
+
+(define_expand "round<mode>2"
+  [(match_operand:VHF_32_64 0 "register_operand")
+   (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+  "TARGET_AVX512FP16 && TARGET_AVX512VL
+   && ix86_partial_vec_fp_math
+   && !flag_trapping_math"
+{
+  rtx op1 = gen_reg_rtx (V8HFmode);
+  rtx op0 = gen_reg_rtx (V8HFmode);
+
+  emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+  emit_insn (gen_roundv8hf2 (op0, op1));
+  emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+  DONE;
+})
+
+(define_expand "lround<mode><mmxintvecmodelower>2"
+  [(match_operand:<mmxintvecmode> 0 "register_operand")
+   (match_operand:VHF_32_64 1 "nonimmediate_operand")]
+  "TARGET_AVX512FP16 && TARGET_AVX512VL
+   && ix86_partial_vec_fp_math
+   && !flag_trapping_math"
+{
+  rtx op1 = gen_reg_rtx (V8HFmode);
+  rtx op0 = gen_reg_rtx (V8HFmode);
+
+  emit_insn (gen_mov<mov_to_sse_suffix>_<mode>_to_sse (op1, operands[1]));
+  emit_insn (gen_lroundv8hfv8hi2 (op0, op1));
+  emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8HFmode));
+
+  DONE;
+})
+
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ;;
 ;; Parallel half-precision floating point logical operations
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 22e43eb3f92..4602edf2374 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -7092,6 +7092,13 @@  (define_expand "vec_unpacks_hi_<mode>"
   DONE;
 })
 
+(define_expand "lrint<mode><sseintvecmodelower>2"
+  [(set (match_operand:<sseintvecmode> 0 "register_operand")
+	(unspec:<sseintvecmode>
+	  [(match_operand:VHF_AVX512VL 1 "register_operand")]
+	 UNSPEC_FIX_NOTRUNC))]
+ "TARGET_AVX512FP16")
+
 (define_insn "avx512fp16_vcvtph2<sseintconvertsignprefix><sseintconvert>_<mode><mask_name><round_name>"
   [(set (match_operand:VI248_AVX512VL 0 "register_operand" "=v")
         (unspec:VI248_AVX512VL
@@ -24183,13 +24190,13 @@  (define_expand "<sse4_1>_round<ssemodesuffix>_vec_pack_sfix<avxsizesuffix>"
 })
 
 (define_insn "sse4_1_round<ssescalarmodesuffix>"
-  [(set (match_operand:VF_128 0 "register_operand" "=Yr,*x,x,v")
-	(vec_merge:VF_128
-	  (unspec:VF_128
-	    [(match_operand:VF_128 2 "nonimmediate_operand" "Yrjm,*xjm,xjm,vm")
+  [(set (match_operand:VFH_128 0 "register_operand" "=Yr,*x,x,v")
+	(vec_merge:VFH_128
+	  (unspec:VFH_128
+	    [(match_operand:VFH_128 2 "nonimmediate_operand" "Yrjm,*xjm,xjm,vm")
 	     (match_operand:SI 3 "const_0_to_15_operand")]
 	    UNSPEC_ROUND)
-	  (match_operand:VF_128 1 "register_operand" "0,0,x,v")
+	  (match_operand:VFH_128 1 "register_operand" "0,0,x,v")
 	  (const_int 1)))]
   "TARGET_SSE4_1"
 {
@@ -24201,7 +24208,7 @@  (define_insn "sse4_1_round<ssescalarmodesuffix>"
       case 2:
 	return "vround<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %<iptr>2, %3}";
       case 3:
-	if (x86_evex_reg_mentioned_p (operands, 3))
+	if (x86_evex_reg_mentioned_p (operands, 3) || <MODE>mode == V8HFmode)
 	  return "vrndscale<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %<iptr>2, %3}";
 	else
 	  return "vround<ssescalarmodesuffix>\t{%3, %2, %1, %0|%0, %1, %<iptr>2, %3}";
@@ -24264,6 +24271,17 @@  (define_expand "floor<mode>2"
   "TARGET_SSE4_1 && !flag_trapping_math"
   "operands[2] = GEN_INT (ROUND_FLOOR | ROUND_NO_EXC);")
 
+(define_expand "lfloor<mode><sseintvecmodelower>2"
+  [(match_operand:<sseintvecmode> 0 "register_operand")
+   (match_operand:VHF_AVX512VL 1 "nonimmediate_operand")]
+ "TARGET_AVX512FP16 && !flag_trapping_math"
+{
+  rtx tmp = gen_reg_rtx (<MODE>mode);
+  emit_insn (gen_floor<mode>2 (tmp, operands[1]));
+  emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+  DONE;
+})
+
 (define_expand "lfloor<mode><sseintvecmodelower>2"
   [(match_operand:<sseintvecmode> 0 "register_operand")
    (match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
@@ -24284,6 +24302,17 @@  (define_expand "ceil<mode>2"
   "TARGET_SSE4_1 && !flag_trapping_math"
   "operands[2] = GEN_INT (ROUND_CEIL | ROUND_NO_EXC);")
 
+(define_expand "lceil<mode><sseintvecmodelower>2"
+  [(match_operand:<sseintvecmode> 0 "register_operand")
+   (match_operand:VHF_AVX512VL 1 "register_operand")]
+ "TARGET_AVX512FP16 && !flag_trapping_math"
+{
+  rtx tmp = gen_reg_rtx (<MODE>mode);
+  emit_insn (gen_ceil<mode>2 (tmp, operands[1]));
+  emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+  DONE;
+})
+
 (define_expand "lceil<mode><sseintvecmodelower>2"
   [(match_operand:<sseintvecmode> 0 "register_operand")
    (match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
@@ -24306,11 +24335,11 @@  (define_expand "btrunc<mode>2"
 
 (define_expand "round<mode>2"
   [(set (match_dup 3)
-	(plus:VF
-	  (match_operand:VF 1 "register_operand")
+	(plus:VFH
+	  (match_operand:VFH 1 "register_operand")
 	  (match_dup 2)))
-   (set (match_operand:VF 0 "register_operand")
-	(unspec:VF
+   (set (match_operand:VFH 0 "register_operand")
+	(unspec:VFH
 	  [(match_dup 3) (match_dup 4)]
 	  UNSPEC_ROUND))]
   "TARGET_SSE4_1 && !flag_trapping_math"
@@ -24338,6 +24367,17 @@  (define_expand "round<mode>2"
   operands[4] = GEN_INT (ROUND_TRUNC);
 })
 
+(define_expand "lround<mode><sseintvecmodelower>2"
+  [(match_operand:<sseintvecmode> 0 "register_operand")
+   (match_operand:VHF_AVX512VL 1 "register_operand")]
+ "TARGET_AVX512FP16 && !flag_trapping_math"
+{
+  rtx tmp = gen_reg_rtx (<MODE>mode);
+  emit_insn (gen_round<mode>2 (tmp, operands[1]));
+  emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+  DONE;
+})
+
 (define_expand "lround<mode><sseintvecmodelower>2"
   [(match_operand:<sseintvecmode> 0 "register_operand")
    (match_operand:VF1_VF2_AVX512DQ 1 "register_operand")]
diff --git a/gcc/testsuite/gcc.target/i386/part-vect-roundhf.c b/gcc/testsuite/gcc.target/i386/part-vect-roundhf.c
new file mode 100644
index 00000000000..38235c157b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/part-vect-roundhf.c
@@ -0,0 +1,217 @@ 
+/* { dg-do run { target avx512fp16 } } */
+/* { dg-options "-O1 -mavx512fp16 -mavx512vl -fdump-tree-slp-details -fdump-tree-optimized" } */
+
+extern void abort ();
+
+static void do_test (void);
+
+#define DO_TEST do_test
+#define AVX512FP16
+#include "avx512-check.h"
+
+#define N 16
+_Float16 b[N] = {-1.2f, 3.4f, -5.6f, 7.8f,
+		 -9.0f, 1.0f, -2.0f, 3.0f,
+		 -4.0f, -5.0f, 6.0f, 7.0f,
+		 -8.0f, -9.0f, 10.0f, 11.0f};
+_Float16 r[N];
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+round_32 (void)
+{
+  r[0] = __builtin_roundf16 (b[0]);
+  r[1] = __builtin_roundf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+round_64 (void)
+{
+  r[0] =  __builtin_roundf16 (b[0]);
+  r[1] =  __builtin_roundf16 (b[1]);
+  r[2] =  __builtin_roundf16 (b[2]);
+  r[3] =  __builtin_roundf16 (b[3]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+rint_32 (void)
+{
+  r[0] = __builtin_rintf16 (b[0]);
+  r[1] = __builtin_rintf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+rint_64 (void)
+{
+  r[0] =  __builtin_rintf16 (b[0]);
+  r[1] =  __builtin_rintf16 (b[1]);
+  r[2] =  __builtin_rintf16 (b[2]);
+  r[3] =  __builtin_rintf16 (b[3]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+nearbyint_32 (void)
+{
+  r[0] = __builtin_nearbyintf16 (b[0]);
+  r[1] = __builtin_nearbyintf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("O2")))
+nearbyint_64 (void)
+{
+  r[0] =  __builtin_nearbyintf16 (b[0]);
+  r[1] =  __builtin_nearbyintf16 (b[1]);
+  r[2] =  __builtin_nearbyintf16 (b[2]);
+  r[3] =  __builtin_nearbyintf16 (b[3]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+trunc_32 (void)
+{
+  r[0] = __builtin_truncf16 (b[0]);
+  r[1] = __builtin_truncf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+trunc_64 (void)
+{
+  r[0] =  __builtin_truncf16 (b[0]);
+  r[1] =  __builtin_truncf16 (b[1]);
+  r[2] =  __builtin_truncf16 (b[2]);
+  r[3] =  __builtin_truncf16 (b[3]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+floor_32 (void)
+{
+  r[0] =  __builtin_floorf16 (b[0]);
+  r[1] =  __builtin_floorf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+floor_64 (void)
+{
+  r[0] =  __builtin_floorf16 (b[0]);
+  r[1] =  __builtin_floorf16 (b[1]);
+  r[2] =  __builtin_floorf16 (b[2]);
+  r[3] =  __builtin_floorf16 (b[3]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+ceil_32 (void)
+{
+  r[0] =  __builtin_ceilf16 (b[0]);
+  r[1] =  __builtin_ceilf16 (b[1]);
+}
+
+void
+__attribute__((noipa,noinline,optimize("Ofast")))
+ceil_64 (void)
+{
+  r[0] =  __builtin_ceilf16 (b[0]);
+  r[1] =  __builtin_ceilf16 (b[1]);
+  r[2] =  __builtin_ceilf16 (b[2]);
+  r[3] =  __builtin_ceilf16 (b[3]);
+}
+
+_Float16
+__attribute__((noipa,noinline,optimize("Ofast")))
+dummy_roundf16 (_Float16 a)
+{
+  return __builtin_roundf16 (a);
+}
+static void
+__attribute__ ((noinline, noclone))
+do_test (void)
+{
+  round_32 ();
+  /* check results:  */
+  for (int i = 0; i != 2; i++)
+    if (r[i] != dummy_roundf16 (b[i]))
+      abort ();
+
+  round_64 ();
+  /* check results:  */
+  for (int i = 0; i != 4; i++)
+    if (r[i] != dummy_roundf16 (b[i]))
+      abort ();
+
+  rint_32 ();
+  /* check results:  */
+  for (int i = 0; i != 2; i++)
+    if (r[i] != __builtin_rintf16 (b[i]))
+      abort ();
+
+  rint_64 ();
+  /* check results:  */
+  for (int i = 0; i != 4; i++)
+    if (r[i] != __builtin_rintf16 (b[i]))
+      abort ();
+
+  nearbyint_32 ();
+  /* check results:  */
+  for (int i = 0; i != 2; i++)
+    if (r[i] != __builtin_nearbyintf16 (b[i]))
+      abort ();
+
+  nearbyint_64 ();
+  /* check results:  */
+  for (int i = 0; i != 4; i++)
+    if (r[i] != __builtin_nearbyintf16 (b[i]))
+      abort ();
+
+  trunc_32 ();
+  /* check results:  */
+  for (int i = 0; i != 2; i++)
+    if (r[i] != __builtin_truncf16 (b[i]))
+      abort ();
+
+  trunc_64 ();
+  /* check results:  */
+  for (int i = 0; i != 4; i++)
+    if (r[i] != __builtin_truncf16 (b[i]))
+      abort ();
+
+  floor_32 ();
+  /* check results:  */
+  for (int i = 0; i != 2; i++)
+    if (r[i] != __builtin_floorf16 (b[i]))
+      abort ();
+
+  floor_64 ();
+  /* check results:  */
+  for (int i = 0; i != 4; i++)
+    if (r[i] != __builtin_floorf16 (b[i]))
+      abort ();
+
+  ceil_32 ();
+  /* check results:  */
+  for (int i = 0; i != 2; i++)
+    if (r[i] != __builtin_ceilf16 (b[i]))
+      abort ();
+
+  ceil_64 ();
+  /* check results:  */
+  for (int i = 0; i != 4; i++)
+    if (r[i] != __builtin_ceilf16 (b[i]))
+      abort ();
+}
+
+/* { dg-final { scan-tree-dump-times "vectorized using 8 byte vectors" 6 "slp2" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times "vectorized using 4 byte vectors" 6 "slp2" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n).CEIL \(vect} 2 "optimized" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n).FLOOR \(vect} 2 "optimized" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n).ROUND \(vect} 2 "optimized" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n).RINT \(vect} 2 "optimized" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n).NEARBYINT \(vect} 2 "optimized" { target { ! ia32 } } } } */
+/* { dg-final { scan-tree-dump-times {(?n).TRUNC \(vect} 2 "optimized" { target { ! ia32 } } } } */
diff --git a/gcc/testsuite/gcc.target/i386/part-vect-sqrtph-1.c b/gcc/testsuite/gcc.target/i386/part-vect-sqrtph-1.c
new file mode 100644
index 00000000000..b7f9e7fb9b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/part-vect-sqrtph-1.c
@@ -0,0 +1,20 @@ 
+/* { dg-do compile } */
+/* { dg-options "-mavx512fp16 -mavx512vl -Ofast" } */
+/* { dg-final { scan-assembler-times {(?n)vsqrtph[ \t].*%xmm[0-9]} 2 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times {(?n)vsqrtph[ \t].*%xmm[0-9]} 2 { target { ! ia32 } } } } */
+
+void
+foo16_sqrt (_Float16* a, _Float16* __restrict c)
+{
+  c[0] = __builtin_sqrtf16 (a[0]);
+  c[1] = __builtin_sqrtf16 (a[1]);
+}
+
+void
+foo32_sqrt(_Float16* a, _Float16* __restrict c)
+{
+  c[0] = __builtin_sqrtf16 (a[0]);
+  c[1] = __builtin_sqrtf16 (a[1]);
+  c[2] = __builtin_sqrtf16 (a[2]);
+  c[3] = __builtin_sqrtf16 (a[3]);
+}