diff mbox

[11/n] Remove GENERIC stmt combining from SCCVN

Message ID alpine.LSU.2.11.1507021603441.9923@zhemvz.fhfr.qr
State New
Headers show

Commit Message

Richard Biener July 2, 2015, 2:05 p.m. UTC
This moves floating-point related comparison foldings from
fold_comparison to match.pd.  I noticed we call fold_comparison
only for a subset of tcc_comparison - the newly introduced
simple_comparison operator list and changes to present patterns
reflect that.

Bootstrapped on x86_64-unknown-linux-gnu, testing in progress.

Richard.

2015-07-02  Richard Biener  <rguenther@suse.de>

	* fold-const.c (fold_mathfn_compare): Remove.
	(fold_inf_compare): Likewise.
	(fold_comparison): Move floating point comparison simplifications...
	* match.pd: ... to patterns here.  Introduce simple_comparisons
	operator list and use it for patterns formerly in fold_comparison.

Comments

H.J. Lu July 5, 2015, 8:24 p.m. UTC | #1
On Thu, Jul 2, 2015 at 7:05 AM, Richard Biener <rguenther@suse.de> wrote:
>
> This moves floating-point related comparison foldings from
> fold_comparison to match.pd.  I noticed we call fold_comparison
> only for a subset of tcc_comparison - the newly introduced
> simple_comparison operator list and changes to present patterns
> reflect that.
>
> Bootstrapped on x86_64-unknown-linux-gnu, testing in progress.
>
> Richard.
>
> 2015-07-02  Richard Biener  <rguenther@suse.de>
>
>         * fold-const.c (fold_mathfn_compare): Remove.
>         (fold_inf_compare): Likewise.
>         (fold_comparison): Move floating point comparison simplifications...
>         * match.pd: ... to patterns here.  Introduce simple_comparisons
>         operator list and use it for patterns formerly in fold_comparison.
>

This caused:

https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66770
diff mbox

Patch

Index: gcc/fold-const.c
===================================================================
--- gcc/fold-const.c	(revision 225305)
+++ gcc/fold-const.c	(working copy)
@@ -145,10 +145,6 @@  static tree fold_binary_op_with_conditio
 						 enum tree_code, tree,
 						 tree, tree,
 						 tree, tree, int);
-static tree fold_mathfn_compare (location_t,
-				 enum built_in_function, enum tree_code,
-				 tree, tree, tree);
-static tree fold_inf_compare (location_t, enum tree_code, tree, tree, tree);
 static tree fold_div_compare (location_t, enum tree_code, tree, tree, tree);
 static bool reorder_operands_p (const_tree, const_tree);
 static tree fold_negate_const (tree, tree);
@@ -6418,199 +6414,6 @@  fold_real_zero_addition_p (const_tree ty
   return negate && !HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type));
 }
 
-/* Subroutine of fold() that checks comparisons of built-in math
-   functions against real constants.
-
-   FCODE is the DECL_FUNCTION_CODE of the built-in, CODE is the comparison
-   operator: EQ_EXPR, NE_EXPR, GT_EXPR, LT_EXPR, GE_EXPR or LE_EXPR.  TYPE
-   is the type of the result and ARG0 and ARG1 are the operands of the
-   comparison.  ARG1 must be a TREE_REAL_CST.
-
-   The function returns the constant folded tree if a simplification
-   can be made, and NULL_TREE otherwise.  */
-
-static tree
-fold_mathfn_compare (location_t loc,
-		     enum built_in_function fcode, enum tree_code code,
-		     tree type, tree arg0, tree arg1)
-{
-  REAL_VALUE_TYPE c;
-
-  if (BUILTIN_SQRT_P (fcode))
-    {
-      tree arg = CALL_EXPR_ARG (arg0, 0);
-      machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
-
-      c = TREE_REAL_CST (arg1);
-      if (REAL_VALUE_NEGATIVE (c))
-	{
-	  /* sqrt(x) < y is always false, if y is negative.  */
-	  if (code == EQ_EXPR || code == LT_EXPR || code == LE_EXPR)
-	    return omit_one_operand_loc (loc, type, integer_zero_node, arg);
-
-	  /* sqrt(x) > y is always true, if y is negative and we
-	     don't care about NaNs, i.e. negative values of x.  */
-	  if (code == NE_EXPR || !HONOR_NANS (mode))
-	    return omit_one_operand_loc (loc, type, integer_one_node, arg);
-
-	  /* sqrt(x) > y is the same as x >= 0, if y is negative.  */
-	  return fold_build2_loc (loc, GE_EXPR, type, arg,
-			      build_real (TREE_TYPE (arg), dconst0));
-	}
-      else if (code == GT_EXPR || code == GE_EXPR)
-	{
-	  REAL_VALUE_TYPE c2;
-
-	  REAL_ARITHMETIC (c2, MULT_EXPR, c, c);
-	  real_convert (&c2, mode, &c2);
-
-	  if (REAL_VALUE_ISINF (c2))
-	    {
-	      /* sqrt(x) > y is x == +Inf, when y is very large.  */
-	      if (HONOR_INFINITIES (mode))
-		return fold_build2_loc (loc, EQ_EXPR, type, arg,
-				    build_real (TREE_TYPE (arg), c2));
-
-	      /* sqrt(x) > y is always false, when y is very large
-		 and we don't care about infinities.  */
-	      return omit_one_operand_loc (loc, type, integer_zero_node, arg);
-	    }
-
-	  /* sqrt(x) > c is the same as x > c*c.  */
-	  return fold_build2_loc (loc, code, type, arg,
-			      build_real (TREE_TYPE (arg), c2));
-	}
-      else if (code == LT_EXPR || code == LE_EXPR)
-	{
-	  REAL_VALUE_TYPE c2;
-
-	  REAL_ARITHMETIC (c2, MULT_EXPR, c, c);
-	  real_convert (&c2, mode, &c2);
-
-	  if (REAL_VALUE_ISINF (c2))
-	    {
-	      /* sqrt(x) < y is always true, when y is a very large
-		 value and we don't care about NaNs or Infinities.  */
-	      if (! HONOR_NANS (mode) && ! HONOR_INFINITIES (mode))
-		return omit_one_operand_loc (loc, type, integer_one_node, arg);
-
-	      /* sqrt(x) < y is x != +Inf when y is very large and we
-		 don't care about NaNs.  */
-	      if (! HONOR_NANS (mode))
-		return fold_build2_loc (loc, NE_EXPR, type, arg,
-				    build_real (TREE_TYPE (arg), c2));
-
-	      /* sqrt(x) < y is x >= 0 when y is very large and we
-		 don't care about Infinities.  */
-	      if (! HONOR_INFINITIES (mode))
-		return fold_build2_loc (loc, GE_EXPR, type, arg,
-				    build_real (TREE_TYPE (arg), dconst0));
-
-	      /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large.  */
-	      arg = save_expr (arg);
-	      return fold_build2_loc (loc, TRUTH_ANDIF_EXPR, type,
-				  fold_build2_loc (loc, GE_EXPR, type, arg,
-					       build_real (TREE_TYPE (arg),
-							   dconst0)),
-				  fold_build2_loc (loc, NE_EXPR, type, arg,
-					       build_real (TREE_TYPE (arg),
-							   c2)));
-	    }
-
-	  /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs.  */
-	  if (! HONOR_NANS (mode))
-	    return fold_build2_loc (loc, code, type, arg,
-				build_real (TREE_TYPE (arg), c2));
-
-	  /* sqrt(x) < c is the same as x >= 0 && x < c*c.  */
-	  arg = save_expr (arg);
-	  return fold_build2_loc (loc, TRUTH_ANDIF_EXPR, type,
-				  fold_build2_loc (loc, GE_EXPR, type, arg,
-					       build_real (TREE_TYPE (arg),
-							   dconst0)),
-				  fold_build2_loc (loc, code, type, arg,
-					       build_real (TREE_TYPE (arg),
-							   c2)));
-	}
-    }
-
-  return NULL_TREE;
-}
-
-/* Subroutine of fold() that optimizes comparisons against Infinities,
-   either +Inf or -Inf.
-
-   CODE is the comparison operator: EQ_EXPR, NE_EXPR, GT_EXPR, LT_EXPR,
-   GE_EXPR or LE_EXPR.  TYPE is the type of the result and ARG0 and ARG1
-   are the operands of the comparison.  ARG1 must be a TREE_REAL_CST.
-
-   The function returns the constant folded tree if a simplification
-   can be made, and NULL_TREE otherwise.  */
-
-static tree
-fold_inf_compare (location_t loc, enum tree_code code, tree type,
-		  tree arg0, tree arg1)
-{
-  machine_mode mode;
-  REAL_VALUE_TYPE max;
-  tree temp;
-  bool neg;
-
-  mode = TYPE_MODE (TREE_TYPE (arg0));
-
-  /* For negative infinity swap the sense of the comparison.  */
-  neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg1));
-  if (neg)
-    code = swap_tree_comparison (code);
-
-  switch (code)
-    {
-    case GT_EXPR:
-      /* x > +Inf is always false, if with ignore sNANs.  */
-      if (HONOR_SNANS (mode))
-        return NULL_TREE;
-      return omit_one_operand_loc (loc, type, integer_zero_node, arg0);
-
-    case LE_EXPR:
-      /* x <= +Inf is always true, if we don't case about NaNs.  */
-      if (! HONOR_NANS (mode))
-	return omit_one_operand_loc (loc, type, integer_one_node, arg0);
-
-      /* x <= +Inf is the same as x == x, i.e. isfinite(x).  */
-      arg0 = save_expr (arg0);
-      return fold_build2_loc (loc, EQ_EXPR, type, arg0, arg0);
-
-    case EQ_EXPR:
-    case GE_EXPR:
-      /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX.  */
-      real_maxval (&max, neg, mode);
-      return fold_build2_loc (loc, neg ? LT_EXPR : GT_EXPR, type,
-			  arg0, build_real (TREE_TYPE (arg0), max));
-
-    case LT_EXPR:
-      /* x < +Inf is always equal to x <= DBL_MAX.  */
-      real_maxval (&max, neg, mode);
-      return fold_build2_loc (loc, neg ? GE_EXPR : LE_EXPR, type,
-			  arg0, build_real (TREE_TYPE (arg0), max));
-
-    case NE_EXPR:
-      /* x != +Inf is always equal to !(x > DBL_MAX).  */
-      real_maxval (&max, neg, mode);
-      if (! HONOR_NANS (mode))
-	return fold_build2_loc (loc, neg ? GE_EXPR : LE_EXPR, type,
-			    arg0, build_real (TREE_TYPE (arg0), max));
-
-      temp = fold_build2_loc (loc, neg ? LT_EXPR : GT_EXPR, type,
-			  arg0, build_real (TREE_TYPE (arg0), max));
-      return fold_build1_loc (loc, TRUTH_NOT_EXPR, type, temp);
-
-    default:
-      break;
-    }
-
-  return NULL_TREE;
-}
-
 /* Subroutine of fold() that optimizes comparisons of a division by
    a nonzero integer constant against an integer constant, i.e.
    X/C1 op C2.
@@ -9075,95 +8829,6 @@  fold_comparison (location_t loc, enum tr
   if (tem)
     return tem;
 
-  if (FLOAT_TYPE_P (TREE_TYPE (arg0)))
-    {
-      tree targ0 = strip_float_extensions (arg0);
-      tree targ1 = strip_float_extensions (arg1);
-      tree newtype = TREE_TYPE (targ0);
-
-      if (TYPE_PRECISION (TREE_TYPE (targ1)) > TYPE_PRECISION (newtype))
-	newtype = TREE_TYPE (targ1);
-
-      /* Fold (double)float1 CMP (double)float2 into float1 CMP float2.  */
-      if (TYPE_PRECISION (newtype) < TYPE_PRECISION (TREE_TYPE (arg0)))
-	return fold_build2_loc (loc, code, type,
-			    fold_convert_loc (loc, newtype, targ0),
-			    fold_convert_loc (loc, newtype, targ1));
-
-      if (TREE_CODE (arg1) == REAL_CST)
-	{
-	  REAL_VALUE_TYPE cst;
-	  cst = TREE_REAL_CST (arg1);
-
-	  /* IEEE doesn't distinguish +0 and -0 in comparisons.  */
-	  /* a CMP (-0) -> a CMP 0  */
-	  if (REAL_VALUE_MINUS_ZERO (cst))
-	    return fold_build2_loc (loc, code, type, arg0,
-				build_real (TREE_TYPE (arg1), dconst0));
-
-	  /* x != NaN is always true, other ops are always false.  */
-	  if (REAL_VALUE_ISNAN (cst)
-	      && ! HONOR_SNANS (arg1))
-	    {
-	      tem = (code == NE_EXPR) ? integer_one_node : integer_zero_node;
-	      return omit_one_operand_loc (loc, type, tem, arg0);
-	    }
-
-	  /* Fold comparisons against infinity.  */
-	  if (REAL_VALUE_ISINF (cst)
-	      && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (arg1))))
-	    {
-	      tem = fold_inf_compare (loc, code, type, arg0, arg1);
-	      if (tem != NULL_TREE)
-		return tem;
-	    }
-	}
-
-      /* If this is a comparison of a real constant with a PLUS_EXPR
-	 or a MINUS_EXPR of a real constant, we can convert it into a
-	 comparison with a revised real constant as long as no overflow
-	 occurs when unsafe_math_optimizations are enabled.  */
-      if (flag_unsafe_math_optimizations
-	  && TREE_CODE (arg1) == REAL_CST
-	  && (TREE_CODE (arg0) == PLUS_EXPR
-	      || TREE_CODE (arg0) == MINUS_EXPR)
-	  && TREE_CODE (TREE_OPERAND (arg0, 1)) == REAL_CST
-	  && 0 != (tem = const_binop (TREE_CODE (arg0) == PLUS_EXPR
-				      ? MINUS_EXPR : PLUS_EXPR,
-				      arg1, TREE_OPERAND (arg0, 1)))
-	  && !TREE_OVERFLOW (tem))
-	return fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 0), tem);
-
-      /* Likewise, we can simplify a comparison of a real constant with
-         a MINUS_EXPR whose first operand is also a real constant, i.e.
-         (c1 - x) < c2 becomes x > c1-c2.  Reordering is allowed on
-         floating-point types only if -fassociative-math is set.  */
-      if (flag_associative_math
-	  && TREE_CODE (arg1) == REAL_CST
-	  && TREE_CODE (arg0) == MINUS_EXPR
-	  && TREE_CODE (TREE_OPERAND (arg0, 0)) == REAL_CST
-	  && 0 != (tem = const_binop (MINUS_EXPR, TREE_OPERAND (arg0, 0),
-				      arg1))
-	  && !TREE_OVERFLOW (tem))
-	return fold_build2_loc (loc, swap_tree_comparison (code), type,
-			    TREE_OPERAND (arg0, 1), tem);
-
-      /* Fold comparisons against built-in math functions.  */
-      if (TREE_CODE (arg1) == REAL_CST
-	  && flag_unsafe_math_optimizations
-	  && ! flag_errno_math)
-	{
-	  enum built_in_function fcode = builtin_mathfn_code (arg0);
-
-	  if (fcode != END_BUILTINS)
-	    {
-	      tem = fold_mathfn_compare (loc, fcode, code, type, arg0, arg1);
-	      if (tem != NULL_TREE)
-		return tem;
-	    }
-	}
-    }
-
   if (TREE_CODE (TREE_TYPE (arg0)) == INTEGER_TYPE
       && CONVERT_EXPR_P (arg0))
     {
Index: gcc/match.pd
===================================================================
--- gcc/match.pd	(revision 225305)
+++ gcc/match.pd	(working copy)
@@ -40,6 +40,19 @@  (define_operator_list inverted_tcc_compa
   unge ungt ne eq unlt unle ordered   unordered ge   gt   le   lt   ltgt uneq)
 (define_operator_list swapped_tcc_comparison
   gt   ge   eq ne le   lt   unordered ordered   ungt unge unlt unle uneq ltgt)
+(define_operator_list simple_comparison         lt   le   eq ne ge   gt)
+(define_operator_list swapped_simple_comparison gt   ge   eq ne le   lt)
+
+(define_operator_list LOG BUILT_IN_LOGF BUILT_IN_LOG BUILT_IN_LOGL)
+(define_operator_list EXP BUILT_IN_EXPF BUILT_IN_EXP BUILT_IN_EXPL)
+(define_operator_list LOG2 BUILT_IN_LOG2F BUILT_IN_LOG2 BUILT_IN_LOG2L)
+(define_operator_list EXP2 BUILT_IN_EXP2F BUILT_IN_EXP2 BUILT_IN_EXP2L)
+(define_operator_list LOG10 BUILT_IN_LOG10F BUILT_IN_LOG10 BUILT_IN_LOG10L)
+(define_operator_list EXP10 BUILT_IN_EXP10F BUILT_IN_EXP10 BUILT_IN_EXP10L)
+(define_operator_list POW BUILT_IN_POWF BUILT_IN_POW BUILT_IN_POWL)
+(define_operator_list POW10 BUILT_IN_POW10F BUILT_IN_POW10 BUILT_IN_POW10L)
+(define_operator_list SQRT BUILT_IN_SQRTF BUILT_IN_SQRT BUILT_IN_SQRTL)
+(define_operator_list CBRT BUILT_IN_CBRTF BUILT_IN_CBRT BUILT_IN_CBRTL)
 
 
 /* Simplifications of operations with one constant operand and
@@ -1314,8 +1362,8 @@  (define_operator_list swapped_tcc_compar
    signed arithmetic case.  That form is created by the compiler
    often enough for folding it to be of value.  One example is in
    computing loop trip counts after Operator Strength Reduction.  */
-(for cmp (tcc_comparison)
-     scmp (swapped_tcc_comparison)
+(for cmp (simple_comparison)
+     scmp (swapped_simple_comparison)
  (simplify
   (cmp (mult @0 INTEGER_CST@1) integer_zerop@2)
   /* Handle unfolded multiplication by zero.  */
@@ -1348,19 +1396,196 @@  (define_operator_list swapped_tcc_compar
    { constant_boolean_node (false, type); })))
 
 /* Fold ~X op ~Y as Y op X.  */
-(for cmp (tcc_comparison)
+(for cmp (simple_comparison)
  (simplify
   (cmp (bit_not @0) (bit_not @1))
   (cmp @1 @0)))
 
 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison.  */
-(for cmp (tcc_comparison)
-     scmp (swapped_tcc_comparison)
+(for cmp (simple_comparison)
+     scmp (swapped_simple_comparison)
  (simplify
   (cmp (bit_not @0) CONSTANT_CLASS_P@1)
   (if (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST)
    (scmp @0 (bit_not @1)))))
 
+(for cmp (simple_comparison)
+ /* Fold (double)float1 CMP (double)float2 into float1 CMP float2.  */
+ (simplify
+  (cmp (convert@2 @0) (convert? @1))
+  (if (FLOAT_TYPE_P (TREE_TYPE (@0))
+       && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
+	   == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
+       && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
+	   == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
+   (with
+    {
+      tree type1 = TREE_TYPE (@1);
+      if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
+        {
+	  REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
+	  if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
+	      && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
+	    type1 = float_type_node;
+	  if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
+	      && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
+	    type1 = double_type_node;
+        }
+      tree newtype
+        = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
+	   ? TREE_TYPE (@0) : type1); 
+    }
+    (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
+     (cmp (convert:newtype @0) (convert:newtype @1))))))
+ 
+ (simplify
+  (cmp @0 REAL_CST@1)
+  /* IEEE doesn't distinguish +0 and -0 in comparisons.  */
+  /* a CMP (-0) -> a CMP 0  */
+  (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
+   (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
+  /* x != NaN is always true, other ops are always false.  */
+  (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
+       && ! HONOR_SNANS (@1))
+   { constant_boolean_node (cmp == NE_EXPR, type); })
+  /* Fold comparisons against infinity.  */
+  (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
+       && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
+   (with
+    {
+      REAL_VALUE_TYPE max;
+      enum tree_code code = cmp;
+      bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
+      if (neg)
+        code = swap_tree_comparison (code);
+    }
+    /* x > +Inf is always false, if with ignore sNANs.  */
+    (if (code == GT_EXPR
+    	 && ! HONOR_SNANS (@0))
+     { constant_boolean_node (false, type); })
+    (if (code == LE_EXPR)
+     /* x <= +Inf is always true, if we don't case about NaNs.  */
+     (if (! HONOR_SNANS (@0))
+      { constant_boolean_node (true, type); })
+     /* x <= +Inf is the same as x == x, i.e. isfinite(x).  */
+     (eq @0 @0))
+    /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX.  */
+    (if (code == EQ_EXPR || code == GE_EXPR)
+     (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
+      (if (neg)
+       (lt @0 { build_real (TREE_TYPE (@0), max); }))
+      (gt @0 { build_real (TREE_TYPE (@0), max); })))
+    /* x < +Inf is always equal to x <= DBL_MAX.  */
+    (if (code == LT_EXPR)
+     (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
+      (if (neg)
+       (ge @0 { build_real (TREE_TYPE (@0), max); }))
+      (le @0 { build_real (TREE_TYPE (@0), max); })))
+    /* x != +Inf is always equal to !(x > DBL_MAX).  */
+    (if (code == NE_EXPR)
+     (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
+      (if (! HONOR_NANS (@0))
+       (if (neg)
+        (ge @0 { build_real (TREE_TYPE (@0), max); }))
+       (le @0 { build_real (TREE_TYPE (@0), max); }))
+      (if (neg)
+       (bit_xor (lt @0 { build_real (TREE_TYPE (@0), max); })
+	        { build_one_cst (type); }))
+      (bit_xor (gt @0 { build_real (TREE_TYPE (@0), max); })
+       { build_one_cst (type); }))))))
+
+ /* If this is a comparison of a real constant with a PLUS_EXPR
+    or a MINUS_EXPR of a real constant, we can convert it into a
+    comparison with a revised real constant as long as no overflow
+    occurs when unsafe_math_optimizations are enabled.  */
+ (if (flag_unsafe_math_optimizations)
+  (for op (plus minus)
+   (simplify
+    (cmp (op @0 REAL_CST@1) REAL_CST@2)
+    (with
+     {
+       tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
+			       TREE_TYPE (@1), @2, @1);
+     }
+     (if (!TREE_OVERFLOW (tem))
+      (cmp @0 { tem; }))))))
+
+ /* Likewise, we can simplify a comparison of a real constant with
+    a MINUS_EXPR whose first operand is also a real constant, i.e.
+    (c1 - x) < c2 becomes x > c1-c2.  Reordering is allowed on
+    floating-point types only if -fassociative-math is set.  */
+ (if (flag_associative_math)
+  (simplify
+   (cmp (minus REAL_CST@0 @1) @2)
+   (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
+    (if (!TREE_OVERFLOW (tem))
+     (cmp { tem; } @1)))))
+
+ /* Fold comparisons against built-in math functions.  */
+ (if (flag_unsafe_math_optimizations
+      && ! flag_errno_math)
+  (for sq (SQRT)
+   (simplify
+    (cmp (sq @0) REAL_CST@1)
+    (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
+     /* sqrt(x) < y is always false, if y is negative.  */
+     (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
+      { constant_boolean_node (false, type); })
+     /* sqrt(x) > y is always true, if y is negative and we
+	don't care about NaNs, i.e. negative values of x.  */
+     (if (cmp == NE_EXPR || !HONOR_NANS (@0))
+      { constant_boolean_node (true, type); })
+     /* sqrt(x) > y is the same as x >= 0, if y is negative.  */
+     (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
+    (if (cmp == GT_EXPR || cmp == GE_EXPR)
+     (with
+      {
+       	REAL_VALUE_TYPE c2;
+	REAL_ARITHMETIC (c2, MULT_EXPR, TREE_REAL_CST (@1), TREE_REAL_CST (@1));
+	real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
+      }
+      (if (REAL_VALUE_ISINF (c2))
+       /* sqrt(x) > y is x == +Inf, when y is very large.  */
+       (if (HONOR_INFINITIES (@0))
+        (eq @0 { build_real (TREE_TYPE (@0), c2); }))
+       { constant_boolean_node (false, type); })
+      /* sqrt(x) > c is the same as x > c*c.  */
+      (cmp @0 { build_real (TREE_TYPE (@0), c2); })))
+    (if (cmp == LT_EXPR || cmp == LE_EXPR)
+     (with
+      {
+       	REAL_VALUE_TYPE c2;
+	REAL_ARITHMETIC (c2, MULT_EXPR, TREE_REAL_CST (@1), TREE_REAL_CST (@1));
+	real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
+      }
+      (if (REAL_VALUE_ISINF (c2))
+       /* sqrt(x) < y is always true, when y is a very large
+	  value and we don't care about NaNs or Infinities.  */
+       (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
+        { constant_boolean_node (true, type); })
+       /* sqrt(x) < y is x != +Inf when y is very large and we
+	  don't care about NaNs.  */
+       (if (! HONOR_NANS (@0))
+        (ne @0 { build_real (TREE_TYPE (@0), c2); }))
+       /* sqrt(x) < y is x >= 0 when y is very large and we
+	  don't care about Infinities.  */
+       (if (! HONOR_INFINITIES (@0))
+        (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
+       /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large.  */
+       (if (GENERIC)
+        (truth_andif
+	 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
+	 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
+      /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs.  */
+      (if (! REAL_VALUE_ISINF (c2)
+           && ! HONOR_NANS (@0))
+       (cmp @0 { build_real (TREE_TYPE (@0), c2); }))
+      /* sqrt(x) < c is the same as x >= 0 && x < c*c.  */
+      (if (! REAL_VALUE_ISINF (c2)
+           && GENERIC)
+       (truth_andif
+        (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
+	(cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
 
 /* Unordered tests if either argument is a NaN.  */
 (simplify
@@ -1427,18 +1677,6 @@  (define_operator_list swapped_tcc_compar
 
 /* Simplification of math builtins.  */
 
-(define_operator_list LOG BUILT_IN_LOGF BUILT_IN_LOG BUILT_IN_LOGL)
-(define_operator_list EXP BUILT_IN_EXPF BUILT_IN_EXP BUILT_IN_EXPL)
-(define_operator_list LOG2 BUILT_IN_LOG2F BUILT_IN_LOG2 BUILT_IN_LOG2L)
-(define_operator_list EXP2 BUILT_IN_EXP2F BUILT_IN_EXP2 BUILT_IN_EXP2L)
-(define_operator_list LOG10 BUILT_IN_LOG10F BUILT_IN_LOG10 BUILT_IN_LOG10L)
-(define_operator_list EXP10 BUILT_IN_EXP10F BUILT_IN_EXP10 BUILT_IN_EXP10L)
-(define_operator_list POW BUILT_IN_POWF BUILT_IN_POW BUILT_IN_POWL)
-(define_operator_list POW10 BUILT_IN_POW10F BUILT_IN_POW10 BUILT_IN_POW10L)
-(define_operator_list SQRT BUILT_IN_SQRTF BUILT_IN_SQRT BUILT_IN_SQRTL)
-(define_operator_list CBRT BUILT_IN_CBRTF BUILT_IN_CBRT BUILT_IN_CBRTL)
-
-
 /* fold_builtin_logarithm */
 (if (flag_unsafe_math_optimizations)
  /* Special case, optimize logN(expN(x)) = x.  */