diff mbox series

Optimize sreal normalization

Message ID 20180824143548.GA49543@kam.mff.cuni.cz
State New
Headers show
Series Optimize sreal normalization | expand

Commit Message

Jan Hubicka Aug. 24, 2018, 2:35 p.m. UTC
Hi,
this patch makes new exp and sig explicit parameter of normalize function.
This is better for inline analysis to track when the result is going to be constant
and make sus to inline sreal constructor for constants.

My main motivation is however to change memory representation of sreal so we do
not need 64bit sig + 32bit exponent.  This is needed only temporarily because
we always normalize to 33bit sig (32bit value + sign).  This needs bit more
massaging to get rid of the extra bit so I will send it as followup.

Bootstrapped/regtested x86_64-linux OK?

Honza

	* sreal.h (normalize, normalize_up, normalize_down): Add new_sig/new_exp
	parameters.
	(sreal constructor): Update.
	* sreal.c (sreal:operator+, sreal:operator-, sreal:operator*,
	sreal:operator/): Update.
diff mbox series

Patch

Index: sreal.h
===================================================================
--- sreal.h	(revision 263834)
+++ sreal.h	(working copy)
@@ -45,9 +45,9 @@  public:
   sreal () : m_sig (-1), m_exp (-1) {}
 
   /* Construct a sreal.  */
-  sreal (int64_t sig, int exp = 0) : m_sig (sig), m_exp (exp)
+  sreal (int64_t sig, int exp = 0)
   {
-    normalize ();
+    normalize (sig, exp);
   }
 
   void dump (FILE *) const;
@@ -130,9 +130,9 @@  public:
   }
 
 private:
-  inline void normalize ();
-  inline void normalize_up ();
-  inline void normalize_down ();
+  inline void normalize (int64_t new_sig, signed int new_exp);
+  inline void normalize_up (int64_t new_sig, signed int new_exp);
+  inline void normalize_down (int64_t new_sig, signed int new_exp);
   void shift_right (int amount);
   static sreal signedless_plus (const sreal &a, const sreal &b, bool negative);
   static sreal signedless_minus (const sreal &a, const sreal &b, bool negative);
@@ -199,23 +199,24 @@  inline sreal operator>> (const sreal &a,
    Make this separate method so inliner can handle hot path better.  */
 
 inline void
-sreal::normalize_up ()
+sreal::normalize_up (int64_t new_sig, signed int new_exp)
 {
-  unsigned HOST_WIDE_INT sig = absu_hwi (m_sig);
+  unsigned HOST_WIDE_INT sig = absu_hwi (new_sig);
   int shift = SREAL_PART_BITS - 2 - floor_log2 (sig);
 
   gcc_checking_assert (shift > 0);
   sig <<= shift;
-  m_exp -= shift;
+  new_exp -= shift;
   gcc_checking_assert (sig <= SREAL_MAX_SIG && sig >= SREAL_MIN_SIG);
 
   /* Check underflow.  */
-  if (m_exp < -SREAL_MAX_EXP)
+  if (new_exp < -SREAL_MAX_EXP)
     {
-      m_exp = -SREAL_MAX_EXP;
+      new_exp = -SREAL_MAX_EXP;
       sig = 0;
     }
-  if (SREAL_SIGN (m_sig) == -1)
+  m_exp = new_exp;
+  if (SREAL_SIGN (new_sig) == -1)
     m_sig = -sig;
   else
     m_sig = sig;
@@ -226,16 +227,16 @@  sreal::normalize_up ()
    Make this separate method so inliner can handle hot path better.  */
 
 inline void
-sreal::normalize_down ()
+sreal::normalize_down (int64_t new_sig, signed int new_exp)
 {
   int last_bit;
-  unsigned HOST_WIDE_INT sig = absu_hwi (m_sig);
+  unsigned HOST_WIDE_INT sig = absu_hwi (new_sig);
   int shift = floor_log2 (sig) - SREAL_PART_BITS + 2;
 
   gcc_checking_assert (shift > 0);
   last_bit = (sig >> (shift-1)) & 1;
   sig >>= shift;
-  m_exp += shift;
+  new_exp += shift;
   gcc_checking_assert (sig <= SREAL_MAX_SIG && sig >= SREAL_MIN_SIG);
 
   /* Round the number.  */
@@ -243,16 +244,17 @@  sreal::normalize_down ()
   if (sig > SREAL_MAX_SIG)
     {
       sig >>= 1;
-      m_exp++;
+      new_exp++;
     }
 
   /* Check overflow.  */
-  if (m_exp > SREAL_MAX_EXP)
+  if (new_exp > SREAL_MAX_EXP)
     {
-      m_exp = SREAL_MAX_EXP;
+      new_exp = SREAL_MAX_EXP;
       sig = SREAL_MAX_SIG;
     }
-  if (SREAL_SIGN (m_sig) == -1)
+  m_exp = new_exp;
+  if (SREAL_SIGN (new_sig) == -1)
     m_sig = -sig;
   else
     m_sig = sig;
@@ -261,16 +263,24 @@  sreal::normalize_down ()
 /* Normalize *this; the hot path.  */
 
 inline void
-sreal::normalize ()
+sreal::normalize (int64_t new_sig, signed int new_exp)
 {
-  unsigned HOST_WIDE_INT sig = absu_hwi (m_sig);
+  unsigned HOST_WIDE_INT sig = absu_hwi (new_sig);
 
   if (sig == 0)
-    m_exp = -SREAL_MAX_EXP;
+    {
+      m_sig = 0;
+      m_exp = -SREAL_MAX_EXP;
+    }
   else if (sig > SREAL_MAX_SIG)
-    normalize_down ();
+    normalize_down (new_sig, new_exp);
   else if (sig < SREAL_MIN_SIG)
-    normalize_up ();
+    normalize_up (new_sig, new_exp);
+  else
+    {
+      m_sig = new_sig;
+      m_exp = new_exp;
+    }
 }
 
 #endif
Index: sreal.c
===================================================================
--- sreal.c	(revision 263834)
+++ sreal.c	(working copy)
@@ -138,7 +138,8 @@  sreal
 sreal::operator+ (const sreal &other) const
 {
   int dexp;
-  sreal tmp, r;
+  sreal tmp;
+  int64_t r_sig, r_exp;
 
   const sreal *a_p = this, *b_p = &other, *bb;
 
@@ -146,10 +147,14 @@  sreal::operator+ (const sreal &other) co
     std::swap (a_p, b_p);
 
   dexp = a_p->m_exp - b_p->m_exp;
-  r.m_exp = a_p->m_exp;
+  r_exp = a_p->m_exp;
   if (dexp > SREAL_BITS)
     {
-      r.m_sig = a_p->m_sig;
+      r_sig = a_p->m_sig;
+
+      sreal r;
+      r.m_sig = r_sig;
+      r.m_exp = r_exp;
       return r;
     }
 
@@ -162,8 +167,8 @@  sreal::operator+ (const sreal &other) co
       bb = &tmp;
     }
 
-  r.m_sig = a_p->m_sig + bb->m_sig;
-  r.normalize ();
+  r_sig = a_p->m_sig + bb->m_sig;
+  sreal r (r_sig, r_exp);
   return r;
 }
 
@@ -174,7 +179,8 @@  sreal
 sreal::operator- (const sreal &other) const
 {
   int dexp;
-  sreal tmp, r;
+  sreal tmp;
+  int64_t r_sig, r_exp;
   const sreal *bb;
   const sreal *a_p = this, *b_p = &other;
 
@@ -186,10 +192,14 @@  sreal::operator- (const sreal &other) co
     }
 
   dexp = a_p->m_exp - b_p->m_exp;
-  r.m_exp = a_p->m_exp;
+  r_exp = a_p->m_exp;
   if (dexp > SREAL_BITS)
     {
-      r.m_sig = sign * a_p->m_sig;
+      r_sig = sign * a_p->m_sig;
+
+      sreal r;
+      r.m_sig = r_sig;
+      r.m_exp = r_exp;
       return r;
     }
   if (dexp == 0)
@@ -201,8 +211,8 @@  sreal::operator- (const sreal &other) co
       bb = &tmp;
     }
 
-  r.m_sig = sign * (a_p->m_sig - bb->m_sig);
-  r.normalize ();
+  r_sig = sign * ((int64_t) a_p->m_sig - bb->m_sig);
+  sreal r (r_sig, r_exp);
   return r;
 }
 
@@ -212,17 +222,14 @@  sreal
 sreal::operator* (const sreal &other) const
 {
   sreal r;
-  if (absu_hwi (m_sig) < SREAL_MIN_SIG || absu_hwi (other.m_sig) < SREAL_MIN_SIG)
+  if (absu_hwi (m_sig) < SREAL_MIN_SIG
+      || absu_hwi (other.m_sig) < SREAL_MIN_SIG)
     {
       r.m_sig = 0;
       r.m_exp = -SREAL_MAX_EXP;
     }
   else
-    {
-      r.m_sig = m_sig * other.m_sig;
-      r.m_exp = m_exp + other.m_exp;
-      r.normalize ();
-    }
+    r.normalize (m_sig * (int64_t) other.m_sig, m_exp + other.m_exp);
 
   return r;
 }
@@ -233,11 +240,9 @@  sreal
 sreal::operator/ (const sreal &other) const
 {
   gcc_checking_assert (other.m_sig != 0);
-  sreal r;
-  r.m_sig
-    = SREAL_SIGN (m_sig) * (SREAL_ABS (m_sig) << SREAL_PART_BITS) / other.m_sig;
-  r.m_exp = m_exp - other.m_exp - SREAL_PART_BITS;
-  r.normalize ();
+  sreal r (SREAL_SIGN (m_sig)
+	   * ((int64_t)SREAL_ABS (m_sig) << SREAL_PART_BITS) / other.m_sig,
+	   m_exp - other.m_exp - SREAL_PART_BITS);
   return r;
 }