diff mbox

[libquadmath] : Use built-in functions instead of fabsq, copysignq and nanq.

Message ID CAFULd4ZsA-biPG5cNs1=tN2RvicsMFbAuNOsaZjeHRnho2iaPQ@mail.gmail.com
State New
Headers show

Commit Message

Uros Bizjak June 12, 2016, 9:50 p.m. UTC
Hello!

Attached (mostly mechanical) patch uses equivalent built-in functions
for fabsq, copysignq and nanq. The patch allows more aggressive
compiler optimizations, where for fabsq and copysignq, the compiler
will emit 128bit SSE bitops, and a 128bit constant load instead of
nanq function call.

The patch also improves detection of required builtins, so for ia64,
additional patch [1] is needed.

2016-06-12  Uros Bizjak  <ubizjak@gmail.com>

    * configure.ac (__float128 support): Also test _builtin_fabsq,
    __builtin_copysignq, __builtin_infq and __builtin_nanq.
    * configure: Regenerate.
    * math/*.c: Use __builtin_fabsq instead of fabsq, __builtin_copysignq
    instead of copysignq, __builtin_infq instead of __builtin_inf and
    __builtin_nanq instead of nanq.

Patch was bootstrapped and regression tested on x86_64-linux-gnu
{,-m32}. I have also checked, that no fabsq, copysignq or nanq calls
remain in the library.

OK for mainline?

[1] https://gcc.gnu.org/ml/gcc-patches/2016-06/msg00888.html

Uros.

Comments

Jakub Jelinek June 14, 2016, 6:30 a.m. UTC | #1
On Sun, Jun 12, 2016 at 11:50:11PM +0200, Uros Bizjak wrote:
> Attached (mostly mechanical) patch uses equivalent built-in functions
> for fabsq, copysignq and nanq. The patch allows more aggressive
> compiler optimizations, where for fabsq and copysignq, the compiler
> will emit 128bit SSE bitops, and a 128bit constant load instead of
> nanq function call.
> 
> The patch also improves detection of required builtins, so for ia64,
> additional patch [1] is needed.
> 
> 2016-06-12  Uros Bizjak  <ubizjak@gmail.com>
> 
>     * configure.ac (__float128 support): Also test _builtin_fabsq,
>     __builtin_copysignq, __builtin_infq and __builtin_nanq.
>     * configure: Regenerate.
>     * math/*.c: Use __builtin_fabsq instead of fabsq, __builtin_copysignq
>     instead of copysignq, __builtin_infq instead of __builtin_inf and
>     __builtin_nanq instead of nanq.
> 
> Patch was bootstrapped and regression tested on x86_64-linux-gnu
> {,-m32}. I have also checked, that no fabsq, copysignq or nanq calls
> remain in the library.

Couldn't you instead add into a header inline functions or macros
that map fabsq to __builtin_fabsq etc.?  Then you could keep the *.c
files as is.
What I don't really like on the patch is that it diverges too much from the
original libc sources (other than replacing suffixes of functions and
changing types).

	Jakub
Joseph Myers June 14, 2016, 11:11 a.m. UTC | #2
On Tue, 14 Jun 2016, Jakub Jelinek wrote:

> Couldn't you instead add into a header inline functions or macros
> that map fabsq to __builtin_fabsq etc.?  Then you could keep the *.c
> files as is.
> What I don't really like on the patch is that it diverges too much from the
> original libc sources (other than replacing suffixes of functions and
> changing types).

Cf. my point that for signbit such a header should do

#undef signbit
#define signbit(x) __builtin_signbit (x)

and likewise for fpclassify, isfinite, isnormal, isnan, isinf.  That way, 
the code can be made *closer* to current glibc sources (libquadmath was 
last updated from glibc in Nov 2012) - current sources now use those 
type-generic macros directly, where previously they called functions such 
as __signbitl, __isinfl etc. (and where libquadmath changed those into 
calls to signbitq etc.).
diff mbox

Patch

Index: configure
===================================================================
--- configure	(revision 237340)
+++ configure	(working copy)
@@ -8513,7 +8513,7 @@  _LT_EOF
 	if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
 	  export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
 	else
-	  export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+	  export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
 	fi
 	aix_use_runtimelinking=no
 
@@ -12514,29 +12514,53 @@  else
 
     typedef _Complex float __attribute__((mode(TC))) __complex128;
 
-    __float128 foo (__float128 x)
+    __float128 test_cplx (__float128 x)
     {
 
-     __complex128 z1, z2;
+      __complex128 z1, z2;
 
-     z1 = x;
-     z2 = x / 7.Q;
-     z2 /= z1;
+      z1 = x;
+      z2 = x / 7.Q;
+      z2 /= z1;
 
-     return (__float128) z2;
+      return (__float128) z2;
     }
 
-    __float128 bar (__float128 x)
+    __float128 test_fabs (__float128 x)
     {
+      return __builtin_fabsq (x);
+    }
+
+    __float128 test_copysign (__float128 x, __float128 y)
+    {
+      return __builtin_copysignq (x, y);
+    }
+
+    __float128 test_huge_val (__float128 x)
+    {
       return x * __builtin_huge_valq ();
     }
 
+    __float128 test_inf (__float128 x)
+    {
+      return x * __builtin_infq ();
+    }
+
+    __float128 test_nan (__float128 x)
+    {
+      return x * __builtin_nanq ("");
+    }
+
 int
 main ()
 {
 
-    foo (1.2Q);
-    bar (1.2Q);
+    test_cplx (1.2Q);
+    test_fabs (1.2Q);
+    test_copysign (1.2Q, -1.2Q);
+    test_huge_val (1.2Q);
+    test_inf (1.2Q);
+    test_nan (1.2Q);
 
   ;
   return 0;
@@ -12561,29 +12585,53 @@  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 
     typedef _Complex float __attribute__((mode(TC))) __complex128;
 
-    __float128 foo (__float128 x)
+    __float128 test_cplx (__float128 x)
     {
 
-     __complex128 z1, z2;
+      __complex128 z1, z2;
 
-     z1 = x;
-     z2 = x / 7.Q;
-     z2 /= z1;
+      z1 = x;
+      z2 = x / 7.Q;
+      z2 /= z1;
 
-     return (__float128) z2;
+      return (__float128) z2;
     }
 
-    __float128 bar (__float128 x)
+    __float128 test_fabs (__float128 x)
     {
+      return __builtin_fabsq (x);
+    }
+
+    __float128 test_copysign (__float128 x, __float128 y)
+    {
+      return __builtin_copysignq (x, y);
+    }
+
+    __float128 test_huge_val (__float128 x)
+    {
       return x * __builtin_huge_valq ();
     }
 
+    __float128 test_inf (__float128 x)
+    {
+      return x * __builtin_infq ();
+    }
+
+    __float128 test_nan (__float128 x)
+    {
+      return x * __builtin_nanq ("");
+    }
+
 int
 main ()
 {
 
-    foo (1.2Q);
-    bar (1.2Q);
+    test_cplx (1.2Q);
+    test_fabs (1.2Q);
+    test_copysign (1.2Q, -1.2Q);
+    test_huge_val (1.2Q);
+    test_inf (1.2Q);
+    test_nan (1.2Q);
 
   ;
   return 0;
Index: configure.ac
===================================================================
--- configure.ac	(revision 237340)
+++ configure.ac	(working copy)
@@ -212,25 +212,49 @@  AC_CACHE_CHECK([whether __float128 is supported],
   [GCC_TRY_COMPILE_OR_LINK([
     typedef _Complex float __attribute__((mode(TC))) __complex128;
 
-    __float128 foo (__float128 x)
+    __float128 test_cplx (__float128 x)
     {
 
-     __complex128 z1, z2;
+      __complex128 z1, z2;
 
-     z1 = x;
-     z2 = x / 7.Q;
-     z2 /= z1;
+      z1 = x;
+      z2 = x / 7.Q;
+      z2 /= z1;
 
-     return (__float128) z2;
+      return (__float128) z2;
     }
 
-    __float128 bar (__float128 x)
+    __float128 test_fabs (__float128 x)
     {
+      return __builtin_fabsq (x);
+    }
+
+    __float128 test_copysign (__float128 x, __float128 y)
+    {
+      return __builtin_copysignq (x, y);
+    }
+
+    __float128 test_huge_val (__float128 x)
+    {
       return x * __builtin_huge_valq ();
     }
+
+    __float128 test_inf (__float128 x)
+    {
+      return x * __builtin_infq ();
+    }
+
+    __float128 test_nan (__float128 x)
+    {
+      return x * __builtin_nanq ("");
+    }
   ],[
-    foo (1.2Q);
-    bar (1.2Q);
+    test_cplx (1.2Q);
+    test_fabs (1.2Q);
+    test_copysign (1.2Q, -1.2Q);
+    test_huge_val (1.2Q);
+    test_inf (1.2Q);
+    test_nan (1.2Q);
   ],[
     libquad_cv_have_float128=yes
   ],[
Index: math/atan2q.c
===================================================================
--- math/atan2q.c	(revision 237340)
+++ math/atan2q.c	(working copy)
@@ -104,7 +104,7 @@  atan2q (__float128 y, __float128 x)
 	k = (iy-ix)>>48;
 	if(k > 120) z=pi_o_2+0.5Q*pi_lo; 	/* |y/x| >  2**120 */
 	else if(hx<0&&k<-120) z=0.0Q; 		/* |y|/x < -2**120 */
-	else z=atanq(fabsq(y/x));		/* safe to do y/x */
+	else z=atanq(__builtin_fabsq(y/x));		/* safe to do y/x */
 	switch (m) {
 	    case 0: return       z  ;	/* atan(+,+) */
 	    case 1: {
Index: math/cacoshq.c
===================================================================
--- math/cacoshq.c	(revision 237340)
+++ math/cacoshq.c	(working copy)
@@ -35,9 +35,9 @@  cacoshq (__complex128 x)
 	  __real__ res = HUGE_VALQ;
 
 	  if (rcls == QUADFP_NAN)
-	    __imag__ res = nanq ("");
+	    __imag__ res = __builtin_nanq ("");
 	  else
-	    __imag__ res = copysignq ((rcls == QUADFP_INFINITE
+	    __imag__ res = __builtin_copysignq ((rcls == QUADFP_INFINITE
 				       ? (__real__ x < 0.0
 					  ? M_PIq - M_PI_4q : M_PI_4q)
 				       : M_PI_2q), __imag__ x);
@@ -47,24 +47,24 @@  cacoshq (__complex128 x)
 	  __real__ res = HUGE_VALQ;
 
 	  if (icls >= QUADFP_ZERO)
-	    __imag__ res = copysignq (signbitq (__real__ x) ? M_PIq : 0.0,
+	    __imag__ res = __builtin_copysignq (signbitq (__real__ x) ? M_PIq : 0.0,
 				      __imag__ x);
 	  else
-	    __imag__ res = nanq ("");
+	    __imag__ res = __builtin_nanq ("");
 	}
       else
 	{
-	  __real__ res = nanq ("");
-	  __imag__ res = nanq ("");
+	  __real__ res = __builtin_nanq ("");
+	  __imag__ res = __builtin_nanq ("");
 	}
     }
   else if (rcls == QUADFP_ZERO && icls == QUADFP_ZERO)
     {
       __real__ res = 0.0;
-      __imag__ res = copysignq (M_PI_2q, __imag__ x);
+      __imag__ res = __builtin_copysignq (M_PI_2q, __imag__ x);
     }
   /* The factor 16 is just a guess.  */
-  else if (16.0Q * fabsq (__imag__ x) < fabsq (__real__ x))
+  else if (16.0Q * __builtin_fabsq (__imag__ x) < __builtin_fabsq (__real__ x))
     {
       /* Kahan's formula which avoid cancellation through subtraction in
 	 some cases.  */
Index: math/casinhq.c
===================================================================
--- math/casinhq.c	(revision 237340)
+++ math/casinhq.c	(working copy)
@@ -32,12 +32,12 @@  casinhq (__complex128 x)
     {
       if (icls == QUADFP_INFINITE)
 	{
-	  __real__ res = copysignq (HUGE_VALQ, __real__ x);
+	  __real__ res = __builtin_copysignq (HUGE_VALQ, __real__ x);
 
 	  if (rcls == QUADFP_NAN)
-	    __imag__ res = nanq ("");
+	    __imag__ res = __builtin_nanq ("");
 	  else
-	    __imag__ res = copysignq (rcls >= QUADFP_ZERO ? M_PI_2q : M_PI_4q,
+	    __imag__ res = __builtin_copysignq (rcls >= QUADFP_ZERO ? M_PI_2q : M_PI_4q,
 				      __imag__ x);
 	}
       else if (rcls <= QUADFP_INFINITE)
@@ -45,14 +45,14 @@  casinhq (__complex128 x)
 	  __real__ res = __real__ x;
 	  if ((rcls == QUADFP_INFINITE && icls >= QUADFP_ZERO)
 	      || (rcls == QUADFP_NAN && icls == QUADFP_ZERO))
-	    __imag__ res = copysignq (0.0, __imag__ x);
+	    __imag__ res = __builtin_copysignq (0.0, __imag__ x);
 	  else
-	    __imag__ res = nanq ("");
+	    __imag__ res = __builtin_nanq ("");
 	}
       else
 	{
-	  __real__ res = nanq ("");
-	  __imag__ res = nanq ("");
+	  __real__ res = __builtin_nanq ("");
+	  __imag__ res = __builtin_nanq ("");
 	}
     }
   else if (rcls == QUADFP_ZERO && icls == QUADFP_ZERO)
@@ -75,8 +75,8 @@  casinhq (__complex128 x)
 
       /* Ensure zeros have correct sign and results are correct if
 	 very close to branch cuts.  */
-      __real__ res = copysignq (__real__ res, __real__ x);
-      __imag__ res = copysignq (__imag__ res, __imag__ x);
+      __real__ res = __builtin_copysignq (__real__ res, __real__ x);
+      __imag__ res = __builtin_copysignq (__imag__ res, __imag__ x);
     }
 
   return res;
Index: math/casinq.c
===================================================================
--- math/casinq.c	(revision 237340)
+++ math/casinq.c	(working copy)
@@ -34,13 +34,13 @@  casinq (__complex128 x)
 	}
       else if (isinfq (__real__ x) || isinfq (__imag__ x))
 	{
-	  __real__ res = nanq ("");
-	  __imag__ res = copysignq (HUGE_VALQ, __imag__ x);
+	  __real__ res = __builtin_nanq ("");
+	  __imag__ res = __builtin_copysignq (HUGE_VALQ, __imag__ x);
 	}
       else
 	{
-	  __real__ res = nanq ("");
-	  __imag__ res = nanq ("");
+	  __real__ res = __builtin_nanq ("");
+	  __imag__ res = __builtin_nanq ("");
 	}
     }
   else
Index: math/catanhq.c
===================================================================
--- math/catanhq.c	(revision 237340)
+++ math/catanhq.c	(working copy)
@@ -32,21 +32,21 @@  catanhq (__complex128 x)
     {
       if (icls == QUADFP_INFINITE)
 	{
-	  __real__ res = copysignq (0.0, __real__ x);
-	  __imag__ res = copysignq (M_PI_2q, __imag__ x);
+	  __real__ res = __builtin_copysignq (0.0, __real__ x);
+	  __imag__ res = __builtin_copysignq (M_PI_2q, __imag__ x);
 	}
       else if (rcls == QUADFP_INFINITE || rcls == QUADFP_ZERO)
 	{
-	  __real__ res = copysignq (0.0, __real__ x);
+	  __real__ res = __builtin_copysignq (0.0, __real__ x);
 	  if (icls >= QUADFP_ZERO)
-	    __imag__ res = copysignq (M_PI_2q, __imag__ x);
+	    __imag__ res = __builtin_copysignq (M_PI_2q, __imag__ x);
 	  else
-	    __imag__ res = nanq ("");
+	    __imag__ res = __builtin_nanq ("");
 	}
       else
 	{
-	  __real__ res = nanq ("");
-	  __imag__ res = nanq ("");
+	  __real__ res = __builtin_nanq ("");
+	  __imag__ res = __builtin_nanq ("");
 	}
     }
   else if (rcls == QUADFP_ZERO && icls == QUADFP_ZERO)
Index: math/catanq.c
===================================================================
--- math/catanq.c	(revision 237340)
+++ math/catanq.c	(working copy)
@@ -32,26 +32,26 @@  catanq (__complex128 x)
     {
       if (rcls == QUADFP_INFINITE)
 	{
-	  __real__ res = copysignq (M_PI_2q, __real__ x);
-	  __imag__ res = copysignq (0.0, __imag__ x);
+	  __real__ res = __builtin_copysignq (M_PI_2q, __real__ x);
+	  __imag__ res = __builtin_copysignq (0.0, __imag__ x);
 	}
       else if (icls == QUADFP_INFINITE)
 	{
 	  if (rcls >= QUADFP_ZERO)
-	    __real__ res = copysignq (M_PI_2q, __real__ x);
+	    __real__ res = __builtin_copysignq (M_PI_2q, __real__ x);
 	  else
-	    __real__ res = nanq ("");
-	  __imag__ res = copysignq (0.0, __imag__ x);
+	    __real__ res = __builtin_nanq ("");
+	  __imag__ res = __builtin_copysignq (0.0, __imag__ x);
 	}
       else if (icls == QUADFP_ZERO || icls == QUADFP_INFINITE)
 	{
-	  __real__ res = nanq ("");
-	  __imag__ res = copysignq (0.0, __imag__ x);
+	  __real__ res = __builtin_nanq ("");
+	  __imag__ res = __builtin_copysignq (0.0, __imag__ x);
 	}
       else
 	{
-	  __real__ res = nanq ("");
-	  __imag__ res = nanq ("");
+	  __real__ res = __builtin_nanq ("");
+	  __imag__ res = __builtin_nanq ("");
 	}
     }
   else if (rcls == QUADFP_ZERO && icls == QUADFP_ZERO)
Index: math/ccoshq.c
===================================================================
--- math/ccoshq.c	(revision 237340)
+++ math/ccoshq.c	(working copy)
@@ -50,10 +50,10 @@  ccoshq (__complex128 x)
 	      cosix = 1.0Q;
 	    }
 
-	  if (fabsq (__real__ x) > t)
+	  if (__builtin_fabsq (__real__ x) > t)
 	    {
 	      __float128 exp_t = expq (t);
-	      __float128 rx = fabsq (__real__ x);
+	      __float128 rx = __builtin_fabsq (__real__ x);
 	      if (signbitq (__real__ x))
 		sinix = -sinix;
 	      rx -= t;
@@ -86,8 +86,8 @@  ccoshq (__complex128 x)
 	}
       else
 	{
-	  __imag__ retval = __real__ x == 0.0Q ? 0.0Q : nanq ("");
-	  __real__ retval = nanq ("") + nanq ("");
+	  __imag__ retval = __real__ x == 0.0Q ? 0.0Q : __builtin_nanq ("");
+	  __real__ retval = __builtin_nanq ("") + __builtin_nanq ("");
 
 #ifdef HAVE_FENV_H
 	  if (icls == QUADFP_INFINITE)
@@ -113,21 +113,21 @@  ccoshq (__complex128 x)
 	      cosix = 1.0Q;
 	    }
 
-	  __real__ retval = copysignq (HUGE_VALQ, cosix);
-	  __imag__ retval = (copysignq (HUGE_VALQ, sinix)
-			     * copysignq (1.0Q, __real__ x));
+	  __real__ retval = __builtin_copysignq (HUGE_VALQ, cosix);
+	  __imag__ retval = (__builtin_copysignq (HUGE_VALQ, sinix)
+			     * __builtin_copysignq (1.0Q, __real__ x));
 	}
       else if (icls == QUADFP_ZERO)
 	{
 	  /* Imaginary part is 0.0.  */
 	  __real__ retval = HUGE_VALQ;
-	  __imag__ retval = __imag__ x * copysignq (1.0Q, __real__ x);
+	  __imag__ retval = __imag__ x * __builtin_copysignq (1.0Q, __real__ x);
 	}
       else
 	{
 	  /* The addition raises the invalid exception.  */
 	  __real__ retval = HUGE_VALQ;
-	  __imag__ retval = nanq ("") + nanq ("");
+	  __imag__ retval = __builtin_nanq ("") + __builtin_nanq ("");
 
 #ifdef HAVE_FENV_H
 	  if (icls == QUADFP_INFINITE)
@@ -137,8 +137,8 @@  ccoshq (__complex128 x)
     }
   else
     {
-      __real__ retval = nanq ("");
-      __imag__ retval = __imag__ x == 0.0 ? __imag__ x : nanq ("");
+      __real__ retval = __builtin_nanq ("");
+      __imag__ retval = __imag__ x == 0.0 ? __imag__ x : __builtin_nanq ("");
     }
 
   return retval;
Index: math/cexpq.c
===================================================================
--- math/cexpq.c	(revision 237340)
+++ math/cexpq.c	(working copy)
@@ -80,8 +80,8 @@  cexpq (__complex128 x)
 	{
 	  /* If the imaginary part is +-inf or NaN and the real part
 	     is not +-inf the result is NaN + iNaN.  */
-	  __real__ retval = nanq ("");
-	  __imag__ retval = nanq ("");
+	  __real__ retval = __builtin_nanq ("");
+	  __imag__ retval = __builtin_nanq ("");
 
 #ifdef HAVE_FENV_H
 	  feraiseexcept (FE_INVALID);
@@ -116,14 +116,14 @@  cexpq (__complex128 x)
 		  cosix = 1.0Q;
 		}
 
-	      __real__ retval = copysignq (value, cosix);
-	      __imag__ retval = copysignq (value, sinix);
+	      __real__ retval = __builtin_copysignq (value, cosix);
+	      __imag__ retval = __builtin_copysignq (value, sinix);
 	    }
 	}
       else if (signbitq (__real__ x) == 0)
 	{
 	  __real__ retval = HUGE_VALQ;
-	  __imag__ retval = nanq ("");
+	  __imag__ retval = __builtin_nanq ("");
 
 #ifdef HAVE_FENV_H
 	  if (icls == QUADFP_INFINITE)
@@ -133,14 +133,14 @@  cexpq (__complex128 x)
       else
 	{
 	  __real__ retval = 0.0Q;
-	  __imag__ retval = copysignq (0.0Q, __imag__ x);
+	  __imag__ retval = __builtin_copysignq (0.0Q, __imag__ x);
 	}
     }
   else
     {
       /* If the real part is NaN the result is NaN + iNaN.  */
-      __real__ retval = nanq ("");
-      __imag__ retval = nanq ("");
+      __real__ retval = __builtin_nanq ("");
+      __imag__ retval = __builtin_nanq ("");
 
 #ifdef HAVE_FENV_H
       if (rcls != QUADFP_NAN || icls != QUADFP_NAN)
Index: math/clog10q.c
===================================================================
--- math/clog10q.c	(revision 237340)
+++ math/clog10q.c	(working copy)
@@ -35,14 +35,14 @@  clog10q (__complex128 x)
     {
       /* Real and imaginary part are 0.0.  */
       __imag__ result = signbitq (__real__ x) ? M_PIq : 0.0Q;
-      __imag__ result = copysignq (__imag__ result, __imag__ x);
+      __imag__ result = __builtin_copysignq (__imag__ result, __imag__ x);
       /* Yes, the following line raises an exception.  */
-      __real__ result = -1.0Q / fabsq (__real__ x);
+      __real__ result = -1.0Q / __builtin_fabsq (__real__ x);
     }
   else if (__builtin_expect (rcls != QUADFP_NAN && icls != QUADFP_NAN, 1))
     {
       /* Neither real nor imaginary part is NaN.  */
-      __float128 absx = fabsq (__real__ x), absy = fabsq (__imag__ x);
+      __float128 absx = __builtin_fabsq (__real__ x), absy = __builtin_fabsq (__imag__ x);
       int scale = 0;
 
       if (absx < absy)
@@ -104,12 +104,12 @@  clog10q (__complex128 x)
     }
   else
     {
-      __imag__ result = nanq ("");
+      __imag__ result = __builtin_nanq ("");
       if (rcls == QUADFP_INFINITE || icls == QUADFP_INFINITE)
 	/* Real or imaginary part is infinite.  */
 	__real__ result = HUGE_VALQ;
       else
-	__real__ result = nanq ("");
+	__real__ result = __builtin_nanq ("");
     }
 
   return result;
Index: math/clogq.c
===================================================================
--- math/clogq.c	(revision 237340)
+++ math/clogq.c	(working copy)
@@ -31,14 +31,14 @@  clogq (__complex128 x)
     {
       /* Real and imaginary part are 0.0.  */
       __imag__ result = signbitq (__real__ x) ? M_PIq : 0.0Q;
-      __imag__ result = copysignq (__imag__ result, __imag__ x);
+      __imag__ result = __builtin_copysignq (__imag__ result, __imag__ x);
       /* Yes, the following line raises an exception.  */
-      __real__ result = -1.0Q / fabsq (__real__ x);
+      __real__ result = -1.0Q / __builtin_fabsq (__real__ x);
     }
   else if (__builtin_expect (rcls != QUADFP_NAN && icls != QUADFP_NAN, 1))
     {
       /* Neither real nor imaginary part is NaN.  */
-      __float128 absx = fabsq (__real__ x), absy = fabsq (__imag__ x);
+      __float128 absx = __builtin_fabsq (__real__ x), absy = __builtin_fabsq (__imag__ x);
       int scale = 0;
 
       if (absx < absy)
@@ -99,12 +99,12 @@  clogq (__complex128 x)
     }
   else
     {
-      __imag__ result = nanq ("");
+      __imag__ result = __builtin_nanq ("");
       if (rcls == QUADFP_INFINITE || icls == QUADFP_INFINITE)
 	/* Real or imaginary part is infinite.  */
 	__real__ result = HUGE_VALQ;
       else
-	__real__ result = nanq ("");
+	__real__ result = __builtin_nanq ("");
     }
 
   return result;
Index: math/cprojq.c
===================================================================
--- math/cprojq.c	(revision 237340)
+++ math/cprojq.c	(working copy)
@@ -30,8 +30,8 @@  cprojq (__complex128 x)
     {
       __complex128 res;
 
-      __real__ res = __builtin_inf ();
-      __imag__ res = copysignq (0.0, __imag__ x);
+      __real__ res = __builtin_infq ();
+      __imag__ res = __builtin_copysignq (0.0, __imag__ x);
 
       return res;
     }
Index: math/csinhq.c
===================================================================
--- math/csinhq.c	(revision 237340)
+++ math/csinhq.c	(working copy)
@@ -32,7 +32,7 @@  csinhq (__complex128 x)
   int rcls = fpclassifyq (__real__ x);
   int icls = fpclassifyq (__imag__ x);
 
-  __real__ x = fabsq (__real__ x);
+  __real__ x = __builtin_fabsq (__real__ x);
 
   if (__builtin_expect (rcls >= QUADFP_ZERO, 1))
     {
@@ -53,10 +53,10 @@  csinhq (__complex128 x)
 	      cosix = 1.0Q;
 	    }
 
-	  if (fabsq (__real__ x) > t)
+	  if (__builtin_fabsq (__real__ x) > t)
 	    {
 	      __float128 exp_t = expq (t);
-	      __float128 rx = fabsq (__real__ x);
+	      __float128 rx = __builtin_fabsq (__real__ x);
 	      if (signbitq (__real__ x))
 		cosix = -cosix;
 	      rx -= t;
@@ -95,8 +95,8 @@  csinhq (__complex128 x)
 	  if (rcls == QUADFP_ZERO)
 	    {
 	      /* Real part is 0.0.  */
-	      __real__ retval = copysignq (0.0Q, negate ? -1.0Q : 1.0Q);
-	      __imag__ retval = nanq ("") + nanq ("");
+	      __real__ retval = __builtin_copysignq (0.0Q, negate ? -1.0Q : 1.0Q);
+	      __imag__ retval = __builtin_nanq ("") + __builtin_nanq ("");
 
 #ifdef HAVE_FENV_H
 	      if (icls == QUADFP_INFINITE)
@@ -105,8 +105,8 @@  csinhq (__complex128 x)
 	    }
 	  else
 	    {
-	      __real__ retval = nanq ("");
-	      __imag__ retval = nanq ("");
+	      __real__ retval = __builtin_nanq ("");
+	      __imag__ retval = __builtin_nanq ("");
 
 #ifdef HAVE_FENV_H
 	      feraiseexcept (FE_INVALID);
@@ -132,8 +132,8 @@  csinhq (__complex128 x)
 	      cosix = 1.0;
 	    }
 
-	  __real__ retval = copysignq (HUGE_VALQ, cosix);
-	  __imag__ retval = copysignq (HUGE_VALQ, sinix);
+	  __real__ retval = __builtin_copysignq (HUGE_VALQ, cosix);
+	  __imag__ retval = __builtin_copysignq (HUGE_VALQ, sinix);
 
 	  if (negate)
 	    __real__ retval = -__real__ retval;
@@ -148,7 +148,7 @@  csinhq (__complex128 x)
 	{
 	  /* The addition raises the invalid exception.  */
 	  __real__ retval = HUGE_VALQ;
-	  __imag__ retval = nanq ("") + nanq ("");
+	  __imag__ retval = __builtin_nanq ("") + __builtin_nanq ("");
 
 #ifdef HAVE_FENV_H
 	  if (icls == QUADFP_INFINITE)
@@ -158,8 +158,8 @@  csinhq (__complex128 x)
     }
   else
     {
-      __real__ retval = nanq ("");
-      __imag__ retval = __imag__ x == 0.0Q ? __imag__ x : nanq ("");
+      __real__ retval = __builtin_nanq ("");
+      __imag__ retval = __imag__ x == 0.0Q ? __imag__ x : __builtin_nanq ("");
     }
 
   return retval;
Index: math/csinq.c
===================================================================
--- math/csinq.c	(revision 237340)
+++ math/csinq.c	(working copy)
@@ -32,7 +32,7 @@  csinq (__complex128 x)
   int rcls = fpclassifyq (__real__ x);
   int icls = fpclassifyq (__imag__ x);
 
-  __real__ x = fabsq (__real__ x);
+  __real__ x = __builtin_fabsq (__real__ x);
 
   if (__builtin_expect (icls >= QUADFP_ZERO, 1))
     {
@@ -53,10 +53,10 @@  csinq (__complex128 x)
 	      cosix = 1.0Q;
 	    }
 
-	  if (fabsq (__imag__ x) > t)
+	  if (__builtin_fabsq (__imag__ x) > t)
 	    {
 	      __float128 exp_t = expq (t);
-	      __float128 ix = fabsq (__imag__ x);
+	      __float128 ix = __builtin_fabsq (__imag__ x);
 	      if (signbitq (__imag__ x))
 		cosix = -cosix;
 	      ix -= t;
@@ -95,7 +95,7 @@  csinq (__complex128 x)
 	  if (icls == QUADFP_ZERO)
 	    {
 	      /* Imaginary part is 0.0.  */
-	      __real__ retval = nanq ("");
+	      __real__ retval = __builtin_nanq ("");
 	      __imag__ retval = __imag__ x;
 
 #ifdef HAVE_FENV_H
@@ -105,8 +105,8 @@  csinq (__complex128 x)
 	    }
 	  else
 	    {
-	      __real__ retval = nanq ("");
-	      __imag__ retval = nanq ("");
+	      __real__ retval = __builtin_nanq ("");
+	      __imag__ retval = __builtin_nanq ("");
 
 #ifdef HAVE_FENV_H
 	      feraiseexcept (FE_INVALID);
@@ -120,7 +120,7 @@  csinq (__complex128 x)
       if (rcls == QUADFP_ZERO)
 	{
 	  /* Real part is 0.0.  */
-	  __real__ retval = copysignq (0.0Q, negate ? -1.0Q : 1.0Q);
+	  __real__ retval = __builtin_copysignq (0.0Q, negate ? -1.0Q : 1.0Q);
 	  __imag__ retval = __imag__ x;
 	}
       else if (rcls > QUADFP_ZERO)
@@ -138,8 +138,8 @@  csinq (__complex128 x)
 	      cosix = 1.0;
 	    }
 
-	  __real__ retval = copysignq (HUGE_VALQ, sinix);
-	  __imag__ retval = copysignq (HUGE_VALQ, cosix);
+	  __real__ retval = __builtin_copysignq (HUGE_VALQ, sinix);
+	  __imag__ retval = __builtin_copysignq (HUGE_VALQ, cosix);
 
 	  if (negate)
 	    __real__ retval = -__real__ retval;
@@ -149,7 +149,7 @@  csinq (__complex128 x)
       else
 	{
 	  /* The addition raises the invalid exception.  */
-	  __real__ retval = nanq ("");
+	  __real__ retval = __builtin_nanq ("");
 	  __imag__ retval = HUGE_VALQ;
 
 #ifdef HAVE_FENV_H
@@ -161,10 +161,10 @@  csinq (__complex128 x)
   else
     {
       if (rcls == QUADFP_ZERO)
-	__real__ retval = copysignq (0.0Q, negate ? -1.0Q : 1.0Q);
+	__real__ retval = __builtin_copysignq (0.0Q, negate ? -1.0Q : 1.0Q);
       else
-	__real__ retval = nanq ("");
-      __imag__ retval = nanq ("");
+	__real__ retval = __builtin_nanq ("");
+      __imag__ retval = __builtin_nanq ("");
     }
 
   return retval;
Index: math/csqrtq.c
===================================================================
--- math/csqrtq.c	(revision 237340)
+++ math/csqrtq.c	(working copy)
@@ -43,20 +43,20 @@  csqrtq (__complex128 x)
 	{
 	  if (__real__ x < 0.0Q)
 	    {
-	      __real__ res = icls == QUADFP_NAN ? nanq ("") : 0;
-	      __imag__ res = copysignq (HUGE_VALQ, __imag__ x);
+	      __real__ res = icls == QUADFP_NAN ? __builtin_nanq ("") : 0;
+	      __imag__ res = __builtin_copysignq (HUGE_VALQ, __imag__ x);
 	    }
 	  else
 	    {
 	      __real__ res = __real__ x;
 	      __imag__ res = (icls == QUADFP_NAN
-			      ? nanq ("") : copysignq (0.0Q, __imag__ x));
+			      ? __builtin_nanq ("") : __builtin_copysignq (0.0Q, __imag__ x));
 	    }
 	}
       else
 	{
-	  __real__ res = nanq ("");
-	  __imag__ res = nanq ("");
+	  __real__ res = __builtin_nanq ("");
+	  __imag__ res = __builtin_nanq ("");
 	}
     }
   else
@@ -66,25 +66,25 @@  csqrtq (__complex128 x)
 	  if (__real__ x < 0.0Q)
 	    {
 	      __real__ res = 0.0Q;
-	      __imag__ res = copysignq (sqrtq (-__real__ x),
+	      __imag__ res = __builtin_copysignq (sqrtq (-__real__ x),
 					__imag__ x);
 	    }
 	  else
 	    {
-	      __real__ res = fabsq (sqrtq (__real__ x));
-	      __imag__ res = copysignq (0.0Q, __imag__ x);
+	      __real__ res = __builtin_fabsq (sqrtq (__real__ x));
+	      __imag__ res = __builtin_copysignq (0.0Q, __imag__ x);
 	    }
 	}
       else if (__builtin_expect (rcls == QUADFP_ZERO, 0))
 	{
 	  __float128 r;
-	  if (fabsq (__imag__ x) >= 2.0Q * FLT128_MIN)
-	    r = sqrtq (0.5Q * fabsq (__imag__ x));
+	  if (__builtin_fabsq (__imag__ x) >= 2.0Q * FLT128_MIN)
+	    r = sqrtq (0.5Q * __builtin_fabsq (__imag__ x));
 	  else
-	    r = 0.5Q * sqrtq (2.0Q * fabsq (__imag__ x));
+	    r = 0.5Q * sqrtq (2.0Q * __builtin_fabsq (__imag__ x));
 
 	  __real__ res = r;
-	  __imag__ res = copysignq (r, __imag__ x);
+	  __imag__ res = __builtin_copysignq (r, __imag__ x);
 	}
       else
 	{
@@ -91,23 +91,23 @@  csqrtq (__complex128 x)
 	  __float128 d, r, s;
 	  int scale = 0;
 
-	  if (fabsq (__real__ x) > FLT128_MAX / 4.0Q)
+	  if (__builtin_fabsq (__real__ x) > FLT128_MAX / 4.0Q)
 	    {
 	      scale = 1;
 	      __real__ x = scalbnq (__real__ x, -2 * scale);
 	      __imag__ x = scalbnq (__imag__ x, -2 * scale);
 	    }
-	  else if (fabsq (__imag__ x) > FLT128_MAX / 4.0Q)
+	  else if (__builtin_fabsq (__imag__ x) > FLT128_MAX / 4.0Q)
 	    {
 	      scale = 1;
-	      if (fabsq (__real__ x) >= 4.0Q * FLT128_MIN)
+	      if (__builtin_fabsq (__real__ x) >= 4.0Q * FLT128_MIN)
 		__real__ x = scalbnq (__real__ x, -2 * scale);
 	      else
 		__real__ x = 0.0Q;
 	      __imag__ x = scalbnq (__imag__ x, -2 * scale);
 	    }
-	  else if (fabsq (__real__ x) < FLT128_MIN
-		   && fabsq (__imag__ x) < FLT128_MIN)
+	  else if (__builtin_fabsq (__real__ x) < FLT128_MIN
+		   && __builtin_fabsq (__imag__ x) < FLT128_MIN)
 	    {
 	      scale = -(FLT128_MANT_DIG / 2);
 	      __real__ x = scalbnq (__real__ x, -2 * scale);
@@ -125,7 +125,7 @@  csqrtq (__complex128 x)
 	  else
 	    {
 	      s = sqrtq (0.5Q * (d - __real__ x));
-	      r = fabsq (0.5Q * (__imag__ x / s));
+	      r = __builtin_fabsq (0.5Q * (__imag__ x / s));
 	    }
 
 	  if (scale)
@@ -135,7 +135,7 @@  csqrtq (__complex128 x)
 	    }
 
 	  __real__ res = r;
-	  __imag__ res = copysignq (s, __imag__ x);
+	  __imag__ res = __builtin_copysignq (s, __imag__ x);
 	}
     }
 
Index: math/ctanhq.c
===================================================================
--- math/ctanhq.c	(revision 237340)
+++ math/ctanhq.c	(working copy)
@@ -33,8 +33,8 @@  ctanhq (__complex128 x)
     {
       if (__quadmath_isinf_nsq (__real__ x))
 	{
-	  __real__ res = copysignq (1.0Q, __real__ x);
-	  __imag__ res = copysignq (0.0Q, __imag__ x);
+	  __real__ res = __builtin_copysignq (1.0Q, __real__ x);
+	  __imag__ res = __builtin_copysignq (0.0Q, __imag__ x);
 	}
       else if (__imag__ x == 0.0Q)
 	{
@@ -42,8 +42,8 @@  ctanhq (__complex128 x)
 	}
       else
 	{
-	  __real__ res = nanq ("");
-	  __imag__ res = nanq ("");
+	  __real__ res = __builtin_nanq ("");
+	  __imag__ res = __builtin_nanq ("");
 
 #ifdef HAVE_FENV_H
 	  if (__quadmath_isinf_nsq (__imag__ x))
@@ -71,7 +71,7 @@  ctanhq (__complex128 x)
 	  cosix = 1.0Q;
 	}
 
-      if (fabsq (__real__ x) > t)
+      if (__builtin_fabsq (__real__ x) > t)
 	{
 	  /* Avoid intermediate overflow when the imaginary part of
 	     the result may be subnormal.  Ignoring negligible terms,
@@ -79,9 +79,9 @@  ctanhq (__complex128 x)
 	     sin(y)*cos(y)/sinh(x)^2 = 4*sin(y)*cos(y)/exp(2x).  */
 	  __float128 exp_2t = expq (2 * t);
 
-	  __real__ res = copysignq (1.0, __real__ x);
+	  __real__ res = __builtin_copysignq (1.0, __real__ x);
 	  __imag__ res = 4 * sinix * cosix;
-	  __real__ x = fabsq (__real__ x);
+	  __real__ x = __builtin_fabsq (__real__ x);
 	  __real__ x -= t;
 	  __imag__ res /= exp_2t;
 	  if (__real__ x > t)
@@ -96,7 +96,7 @@  ctanhq (__complex128 x)
       else
 	{
 	  __float128 sinhrx, coshrx;
-	  if (fabsq (__real__ x) > FLT128_MIN)
+	  if (__builtin_fabsq (__real__ x) > FLT128_MIN)
 	    {
 	      sinhrx = sinhq (__real__ x);
 	      coshrx = coshq (__real__ x);
@@ -107,7 +107,7 @@  ctanhq (__complex128 x)
 	      coshrx = 1.0Q;
 	    }
 
-	  if (fabsq (sinhrx) > fabsq (cosix) * FLT128_EPSILON)
+	  if (__builtin_fabsq (sinhrx) > __builtin_fabsq (cosix) * FLT128_EPSILON)
 	    den = sinhrx * sinhrx + cosix * cosix;
 	  else
 	    den = cosix * cosix;
Index: math/ctanq.c
===================================================================
--- math/ctanq.c	(revision 237340)
+++ math/ctanq.c	(working copy)
@@ -33,8 +33,8 @@  ctanq (__complex128 x)
     {
       if (__quadmath_isinf_nsq (__imag__ x))
 	{
-	  __real__ res = copysignq (0.0Q, __real__ x);
-	  __imag__ res = copysignq (1.0Q, __imag__ x);
+	  __real__ res = __builtin_copysignq (0.0Q, __real__ x);
+	  __imag__ res = __builtin_copysignq (1.0Q, __imag__ x);
 	}
       else if (__real__ x == 0.0Q)
 	{
@@ -42,8 +42,8 @@  ctanq (__complex128 x)
 	}
       else
 	{
-	  __real__ res = nanq ("");
-	  __imag__ res = nanq ("");
+	  __real__ res = __builtin_nanq ("");
+	  __imag__ res = __builtin_nanq ("");
 
 #ifdef HAVE_FENV_H
 	  if (__quadmath_isinf_nsq (__real__ x))
@@ -71,7 +71,7 @@  ctanq (__complex128 x)
 	  cosrx = 1.0Q;
 	}
 
-      if (fabsq (__imag__ x) > t)
+      if (__builtin_fabsq (__imag__ x) > t)
 	{
 	  /* Avoid intermediate overflow when the real part of the
 	     result may be subnormal.  Ignoring negligible terms, the
@@ -79,9 +79,9 @@  ctanq (__complex128 x)
 	     sin(x)*cos(x)/sinh(y)^2 = 4*sin(x)*cos(x)/exp(2y).  */
 	  __float128 exp_2t = expq (2 * t);
 
-	  __imag__ res = copysignq (1.0Q, __imag__ x);
+	  __imag__ res = __builtin_copysignq (1.0Q, __imag__ x);
 	  __real__ res = 4 * sinrx * cosrx;
-	  __imag__ x = fabsq (__imag__ x);
+	  __imag__ x = __builtin_fabsq (__imag__ x);
 	  __imag__ x -= t;
 	  __real__ res /= exp_2t;
 	  if (__imag__ x > t)
@@ -96,7 +96,7 @@  ctanq (__complex128 x)
       else
 	{
 	  __float128 sinhix, coshix;
-	  if (fabsq (__imag__ x) > FLT128_MIN)
+	  if (__builtin_fabsq (__imag__ x) > FLT128_MIN)
 	    {
 	      sinhix = sinhq (__imag__ x);
 	      coshix = coshq (__imag__ x);
@@ -107,7 +107,7 @@  ctanq (__complex128 x)
 	      coshix = 1.0Q;
 	    }
 
-	  if (fabsq (sinhix) > fabsq (cosrx) * FLT128_EPSILON)
+	  if (__builtin_fabsq (sinhix) > __builtin_fabsq (cosrx) * FLT128_EPSILON)
 	    den = cosrx * cosrx + sinhix * sinhix;
 	  else
 	    den = cosrx * cosrx;
Index: math/erfq.c
===================================================================
--- math/erfq.c	(revision 237340)
+++ math/erfq.c	(working copy)
@@ -884,7 +884,7 @@  erfcq (__float128 x)
       if ((ix >= 0x40022000) && (sign & 0x80000000))
 	return two - tiny;
 
-      x = fabsq (x);
+      x = __builtin_fabsq (x);
       z = one / (x * x);
       i = 8.0 / x;
       switch (i)
Index: math/j0q.c
===================================================================
--- math/j0q.c	(revision 237340)
+++ math/j0q.c	(working copy)
@@ -688,7 +688,7 @@  j0q (__float128 x)
   if (x == 0.0Q)
     return 1.0Q;
 
-  xx = fabsq (x);
+  xx = __builtin_fabsq (x);
   if (xx <= 2.0Q)
     {
       /* 0 <= x <= 2 */
@@ -829,7 +829,7 @@  y0q (__float128 x)
 	return (zero / (zero * x));
       return -HUGE_VALQ + x;
     }
-  xx = fabsq (x);
+  xx = __builtin_fabsq (x);
   if (xx <= 0x1p-57)
     return U0 + TWOOPI * logq (x);
   if (xx <= 2.0Q)
Index: math/j1q.c
===================================================================
--- math/j1q.c	(revision 237340)
+++ math/j1q.c	(working copy)
@@ -693,7 +693,7 @@  j1q (__float128 x)
     }
   if (x == 0.0Q)
     return x;
-  xx = fabsq (x);
+  xx = __builtin_fabsq (x);
   if (xx <= 2.0Q)
     {
       /* 0 <= x <= 2 */
@@ -835,7 +835,7 @@  y1q (__float128 x)
 	return (zero / (zero * x));
       return -HUGE_VALQ + x;
     }
-  xx = fabsq (x);
+  xx = __builtin_fabsq (x);
   if (xx <= 0x1p-114)
     return -TWOOPI / x;
   if (xx <= 2.0Q)
Index: math/jnq.c
===================================================================
--- math/jnq.c	(revision 237340)
+++ math/jnq.c	(working copy)
@@ -102,7 +102,7 @@  jnq (int n, __float128 x)
   if (n == 1)
     return (j1q (x));
   sgn = (n & 1) & (se >> 31);	/* even n -- 0, odd n -- sign(x) */
-  x = fabsq (x);
+  x = __builtin_fabsq (x);
 
   if (x == 0.0Q || ix >= 0x7fff0000)	/* if x is 0 or inf */
     b = zero;
@@ -243,7 +243,7 @@  jnq (int n, __float128 x)
 	   */
 	  tmp = n;
 	  v = two / x;
-	  tmp = tmp * logq (fabsq (v * tmp));
+	  tmp = tmp * logq (__builtin_fabsq (v * tmp));
 
 	  if (tmp < 1.1356523406294143949491931077970765006170e+04Q)
 	    {
@@ -280,7 +280,7 @@  jnq (int n, __float128 x)
 	   */
 	  z = j0q (x);
 	  w = j1q (x);
-	  if (fabsq (z) >= fabsq (w))
+	  if (__builtin_fabsq (z) >= __builtin_fabsq (w))
 	    b = (t * z / b);
 	  else
 	    b = (t * w / a);
Index: math/logbq.c
===================================================================
--- math/logbq.c	(revision 237340)
+++ math/logbq.c	(working copy)
@@ -29,7 +29,7 @@  logbq (__float128 x)
   GET_FLT128_WORDS64 (hx, lx, x);
   hx &= 0x7fffffffffffffffLL;	/* high |x| */
   if ((hx | lx) == 0)
-    return -1.0 / fabsq (x);
+    return -1.0 / __builtin_fabsq (x);
   if (hx >= 0x7fff000000000000LL)
     return x * x;
   if ((ex = hx >> 48) == 0)	/* IEEE 754 logb */
Index: math/powq.c
===================================================================
--- math/powq.c	(revision 237340)
+++ math/powq.c	(working copy)
@@ -235,7 +235,7 @@  powq (__float128 x, __float128 y)
 	}
     }
 
-  ax = fabsq (x);
+  ax = __builtin_fabsq (x);
   /* special value of x */
   if ((p.words32.w1 | p.words32.w2 | p.words32.w3) == 0)
     {
Index: math/remainderq.c
===================================================================
--- math/remainderq.c	(revision 237340)
+++ math/remainderq.c	(working copy)
@@ -47,8 +47,8 @@  remainderq (__float128 x, __float128 p)
 
   if (hp<=0x7ffdffffffffffffLL) x = fmodq (x,p+p);	/* now x < 2p */
   if (((hx-hp)|(lx-lp))==0) return zero*x;
-  x  = fabsq(x);
-  p  = fabsq(p);
+  x  = __builtin_fabsq(x);
+  p  = __builtin_fabsq(p);
   if (hp<0x0002000000000000LL) {
       if(x+x>p) {
 	  x-=p;
Index: math/remquoq.c
===================================================================
--- math/remquoq.c	(revision 237340)
+++ math/remquoq.c	(working copy)
@@ -56,8 +56,8 @@  remquoq (__float128 x, __float128 y, int *quo)
       return zero * x;
     }
 
-  x  = fabsq (x);
-  y  = fabsq (y);
+  x  = __builtin_fabsq (x);
+  y  = __builtin_fabsq (y);
   cquo = 0;
 
   if (x >= 4 * y)
Index: math/scalblnq.c
===================================================================
--- math/scalblnq.c	(revision 237340)
+++ math/scalblnq.c	(working copy)
@@ -41,9 +41,9 @@  scalblnq (__float128 x, long int n)
 	    k = ((hx>>48)&0x7fff) - 114;
 	}
         if (k==0x7fff) return x+x;		/* NaN or Inf */
-	if (n< -50000) return tiny*copysignq(tiny,x); /*underflow*/
+	if (n< -50000) return tiny*__builtin_copysignq(tiny,x); /*underflow*/
         if (n> 50000 || k+n > 0x7ffe)
-	  return huge*copysignq(huge,x); /* overflow  */
+	  return huge*__builtin_copysignq(huge,x); /* overflow  */
 	/* Now k and n are bounded we know that k = k+n does not
 	   overflow.  */
         k = k+n;
@@ -50,7 +50,7 @@  scalblnq (__float128 x, long int n)
         if (k > 0) 				/* normal result */
 	    {SET_FLT128_MSW64(x,(hx&0x8000ffffffffffffULL)|(k<<48)); return x;}
         if (k <= -114)
-	  return tiny*copysignq(tiny,x); 	/*underflow*/
+	  return tiny*__builtin_copysignq(tiny,x); 	/*underflow*/
         k += 114;				/* subnormal result */
 	SET_FLT128_MSW64(x,(hx&0x8000ffffffffffffULL)|(k<<48));
         return x*twom114;
Index: math/scalbnq.c
===================================================================
--- math/scalbnq.c	(revision 237340)
+++ math/scalbnq.c	(working copy)
@@ -42,9 +42,9 @@  scalbnq (__float128 x, int n)
 	    k = ((hx>>48)&0x7fff) - 114;
 	}
         if (k==0x7fff) return x+x;		/* NaN or Inf */
-	if (n< -50000) return tiny*copysignq(tiny,x); /*underflow*/
+	if (n< -50000) return tiny*__builtin_copysignq(tiny,x); /*underflow*/
         if (n> 50000 || k+n > 0x7ffe)
-	  return huge*copysignq(huge,x); /* overflow  */
+	  return huge*__builtin_copysignq(huge,x); /* overflow  */
 	/* Now k and n are bounded we know that k = k+n does not
 	   overflow.  */
         k = k+n;
@@ -51,7 +51,7 @@  scalbnq (__float128 x, int n)
         if (k > 0) 				/* normal result */
 	    {SET_FLT128_MSW64(x,(hx&0x8000ffffffffffffULL)|(k<<48)); return x;}
         if (k <= -114)
-	  return tiny*copysignq(tiny,x); 	/*underflow*/
+	  return tiny*__builtin_copysignq(tiny,x); 	/*underflow*/
         k += 114;				/* subnormal result */
 	SET_FLT128_MSW64(x,(hx&0x8000ffffffffffffULL)|(k<<48));
         return x*twom114;
Index: math/sinq_kernel.c
===================================================================
--- math/sinq_kernel.c	(revision 237340)
+++ math/sinq_kernel.c	(working copy)
@@ -105,7 +105,7 @@  __quadmath_kernel_sinq (__float128 x, __float128 y
 	 sinq(h+l) = sinq(h)cosq(l) + cosq(h)sinq(l).  */
       index = 0x3ffe - (tix >> 16);
       hix = (tix + (0x200 << index)) & (0xfffffc00 << index);
-      x = fabsq (x);
+      x = __builtin_fabsq (x);
       switch (index)
 	{
 	case 0: index = ((45 << 10) + hix - 0x3ffe0000) >> 8; break;
Index: math/tanq.c
===================================================================
--- math/tanq.c	(revision 237340)
+++ math/tanq.c	(working copy)
@@ -98,7 +98,7 @@  __quadmath_kernel_tanq (__float128 x, __float128 y
 	{			/* generate inexact */
 	  if ((ix | u.words32.w1 | u.words32.w2 | u.words32.w3
 	       | (iy + 1)) == 0)
-	    return one / fabsq (x);
+	    return one / __builtin_fabsq (x);
 	  else
 	    return (iy == 1) ? x : -one / x;
 	}
Index: math/x2y2m1q.c
===================================================================
--- math/x2y2m1q.c	(revision 237340)
+++ math/x2y2m1q.c	(working copy)
@@ -49,8 +49,8 @@  mul_split (__float128 *hi, __float128 *lo, __float
 static int
 compare (const void *p, const void *q)
 {
-  __float128 pld = fabsq (*(const __float128 *) p);
-  __float128 qld = fabsq (*(const __float128 *) q);
+  __float128 pld = __builtin_fabsq (*(const __float128 *) p);
+  __float128 qld = __builtin_fabsq (*(const __float128 *) q);
   if (pld < qld)
     return -1;
   else if (pld == qld)