diff mbox

Clean up int_const_binop and simplify_const_binary_operation function

Message ID 472066754.20100616232712@post.ru
State New
Headers show

Commit Message

Anatoly Sokolov June 16, 2010, 7:27 p.m. UTC
Hi.

  This patch clean up int_const_binop and simplify_const_binary_operation
function  by using double_int_* utility function.

  Also this patch add double_int_xor, double_int_lrotate, double_int_rrotate
and double_int_min/max function in the GCC and remove unnecessary
lrotate_double and rrotate_double function.

  Bootstrapped/regtested on x86_64-unknown-linux-gnu and
hppa2.0-unknown-linux-gnu for c, c++ and ada.

  OK for mainline?

        * double-int.h (double_int_to_shwi, double_int_to_uhwi,
        double_int_fits_in_uhwi_p): Implement as static inline.
        (): New inline function.
        (double_int_lrotate, double_int_rrotate, double_int_max,
        double_int_umax, double_int_smax, double_int_min, double_int_umin,
        double_int_smin): Declare.
        (lrotate_double, rrotate_double): Remove declaration.
        * double-int.c (double_int_fits_in_uhwi_p, double_int_to_shwi,
        double_int_to_uhwi, lrotate_double, rrotate_double): Remove function.
        (double_int_lrotate, double_int_rrotate, double_int_max,
        double_int_umax, double_int_smax, double_int_min, double_int_umin,
        double_int_smin): New function.
        * fold-const.c (int_const_binop): Clean up, use double_int_*
        functions.
        * simplify-rtx.c (simplify_const_binary_operation): Clean up, use
        double_int_* and immed_double_int_const functions.




Anatoly.

Comments

Richard Biener June 17, 2010, 8:54 a.m. UTC | #1
2010/6/16 Anatoly Sokolov <aesok@post.ru>:
>  Hi.
>
>  This patch clean up int_const_binop and simplify_const_binary_operation
> function  by using double_int_* utility function.
>
>  Also this patch add double_int_xor, double_int_lrotate, double_int_rrotate
> and double_int_min/max function in the GCC and remove unnecessary
> lrotate_double and rrotate_double function.
>
>  Bootstrapped/regtested on x86_64-unknown-linux-gnu and
> hppa2.0-unknown-linux-gnu for c, c++ and ada.
>
>  OK for mainline?

Ok with ...

>        * double-int.h (double_int_to_shwi, double_int_to_uhwi,
>        double_int_fits_in_uhwi_p): Implement as static inline.
>        (): New inline function.
>        (double_int_lrotate, double_int_rrotate, double_int_max,
>        double_int_umax, double_int_smax, double_int_min, double_int_umin,
>        double_int_smin): Declare.
>        (lrotate_double, rrotate_double): Remove declaration.
>        * double-int.c (double_int_fits_in_uhwi_p, double_int_to_shwi,
>        double_int_to_uhwi, lrotate_double, rrotate_double): Remove function.
>        (double_int_lrotate, double_int_rrotate, double_int_max,
>        double_int_umax, double_int_smax, double_int_min, double_int_umin,
>        double_int_smin): New function.
>        * fold-const.c (int_const_binop): Clean up, use double_int_*
>        functions.
>        * simplify-rtx.c (simplify_const_binary_operation): Clean up, use
>        double_int_* and immed_double_int_const functions.
>
>
> Index: gcc/double-int.c
> ===================================================================
> --- gcc/double-int.c    (revision 160850)
> +++ gcc/double-int.c    (working copy)
> @@ -432,51 +432,6 @@
>     }
>  }
>
> -/* Rotate the doubleword integer in L1, H1 left by COUNT places
> -   keeping only PREC bits of result.
> -   Rotate right if COUNT is negative.
> -   Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV.  */
> -
> -void
> -lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
> -               HOST_WIDE_INT count, unsigned int prec,
> -               unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
> -{
> -  unsigned HOST_WIDE_INT s1l, s2l;
> -  HOST_WIDE_INT s1h, s2h;
> -
> -  count %= prec;
> -  if (count < 0)
> -    count += prec;
> -
> -  lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
> -  rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
> -  *lv = s1l | s2l;
> -  *hv = s1h | s2h;
> -}
> -
> -/* Rotate the doubleword integer in L1, H1 left by COUNT places
> -   keeping only PREC bits of result.  COUNT must be positive.
> -   Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV.  */
> -
> -void
> -rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
> -               HOST_WIDE_INT count, unsigned int prec,
> -               unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
> -{
> -  unsigned HOST_WIDE_INT s1l, s2l;
> -  HOST_WIDE_INT s1h, s2h;
> -
> -  count %= prec;
> -  if (count < 0)
> -    count += prec;
> -
> -  rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
> -  lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
> -  *lv = s1l | s2l;
> -  *hv = s1h | s2h;
> -}
> -
>  /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
>    for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
>    CODE is a tree code for a kind of division, one of
> @@ -842,14 +797,6 @@
>   return r;
>  }
>
> -/* Returns true if CST fits in unsigned HOST_WIDE_INT.  */
> -
> -bool
> -double_int_fits_in_uhwi_p (double_int cst)
> -{
> -  return cst.high == 0;
> -}
> -
>  /* Returns true if CST fits in signed HOST_WIDE_INT.  */
>
>  bool
> @@ -875,24 +822,6 @@
>     return double_int_fits_in_shwi_p (cst);
>  }
>
> -/* Returns value of CST as a signed number.  CST must satisfy
> -   double_int_fits_in_shwi_p.  */
> -
> -HOST_WIDE_INT
> -double_int_to_shwi (double_int cst)
> -{
> -  return (HOST_WIDE_INT) cst.low;
> -}
> -
> -/* Returns value of CST as an unsigned number.  CST must satisfy
> -   double_int_fits_in_uhwi_p.  */
> -
> -unsigned HOST_WIDE_INT
> -double_int_to_uhwi (double_int cst)
> -{
> -  return cst.low;
> -}
> -
>  /* Returns A * B.  */
>
>  double_int
> @@ -1049,6 +978,42 @@
>   return ret;
>  }
>
> +/* Rotate  A left by COUNT places keeping only PREC bits of result.
> +   Rotate right if COUNT is negative.  */
> +
> +double_int
> +double_int_lrotate (double_int a, HOST_WIDE_INT count, unsigned int prec)
> +{
> +  double_int t1, t2;
> +
> +  count %= prec;
> +  if (count < 0)
> +    count += prec;
> +
> +  t1 = double_int_lshift (a, count, prec, false);
> +  t2 = double_int_rshift (a, prec - count, prec, false);
> +
> +  return double_int_ior (t1, t2);
> +}
> +
> +/* Rotate A rigth by COUNT places keeping only PREC bits of result.
> +   Rotate right if COUNT is negative.  */
> +
> +double_int
> +double_int_rrotate (double_int a, HOST_WIDE_INT count, unsigned int prec)
> +{
> +  double_int t1, t2;
> +
> +  count %= prec;
> +  if (count < 0)
> +    count += prec;
> +
> +  t1 = double_int_rshift (a, count, prec, false);
> +  t2 = double_int_lshift (a, prec - count, prec, false);
> +
> +  return double_int_ior (t1, t2);
> +}
> +
>  /* Returns -1 if A < B, 0 if A == B and 1 if A > B.  Signedness of the
>    comparison is given by UNS.  */
>
> @@ -1097,6 +1062,51 @@
>   return 0;
>  }
>
> +/* Compares two values A and B.  Returns max value.  Signedness of the
> +   comparison is given by UNS.  */
> +
> +double_int
> +double_int_max (double_int a, double_int b, bool uns)
> +{
> +  return (double_int_cmp (a, b, uns) == 1) ? a : b;
> +}
> +
> +/* Compares two signed values A and B.  Returns max value.  */
> +
> +double_int double_int_smax (double_int a, double_int b)
> +{
> +  return (double_int_scmp (a, b) == 1) ? a : b;
> +}
> +
> +/* Compares two unsigned values A and B.  Returns max value.  */
> +
> +double_int double_int_umax (double_int a, double_int b)
> +{
> +  return (double_int_ucmp (a, b) == 1) ? a : b;
> +}
> +
> +/* Compares two values A and B.  Returns mix value.  Signedness of the
> +   comparison is given by UNS.  */
> +
> +double_int double_int_min (double_int a, double_int b, bool uns)
> +{
> +  return (double_int_cmp (a, b, uns) == -1) ? a : b;
> +}
> +
> +/* Compares two signed values A and B.  Returns min value.  */
> +
> +double_int double_int_smin (double_int a, double_int b)
> +{
> +  return (double_int_scmp (a, b) == -1) ? a : b;
> +}
> +
> +/* Compares two unsigned values A and B.  Returns min value.  */
> +
> +double_int double_int_umin (double_int a, double_int b)
> +{
> +  return (double_int_ucmp (a, b) == -1) ? a : b;
> +}
> +
>  /* Splits last digit of *CST (taken as unsigned) in BASE and returns it.  */
>
>  static unsigned
> Index: gcc/double-int.h
> ===================================================================
> --- gcc/double-int.h    (revision 160850)
> +++ gcc/double-int.h    (working copy)
> @@ -97,6 +97,35 @@
>   return r;
>  }
>
> +/* Returns value of CST as a signed number.  CST must satisfy
> +   double_int_fits_in_shwi_p.  */
> +
> +static inline HOST_WIDE_INT
> +double_int_to_shwi (double_int cst)
> +{
> +  return (HOST_WIDE_INT) cst.low;
> +}
> +
> +/* Returns value of CST as an unsigned number.  CST must satisfy
> +   double_int_fits_in_uhwi_p.  */
> +
> +static inline unsigned HOST_WIDE_INT
> +double_int_to_uhwi (double_int cst)
> +{
> +  return cst.low;
> +}
> +
> +bool double_int_fits_in_hwi_p (double_int, bool);
> +bool double_int_fits_in_shwi_p (double_int);
> +
> +/* Returns true if CST fits in unsigned HOST_WIDE_INT.  */
> +
> +static inline bool
> +double_int_fits_in_uhwi_p (double_int cst)
> +{
> +  return cst.high == 0;
> +}
> +
>  /* The following operations perform arithmetics modulo 2^precision,
>    so you do not need to call double_int_ext between them, even if
>    you are representing numbers with precision less than
> @@ -109,11 +138,6 @@
>  /* You must ensure that double_int_ext is called on the operands
>    of the following operations, if the precision of the numbers
>    is less than 2 * HOST_BITS_PER_WIDE_INT bits.  */
> -bool double_int_fits_in_hwi_p (double_int, bool);
> -bool double_int_fits_in_shwi_p (double_int);
> -bool double_int_fits_in_uhwi_p (double_int);
> -HOST_WIDE_INT double_int_to_shwi (double_int);
> -unsigned HOST_WIDE_INT double_int_to_uhwi (double_int);
>  double_int double_int_div (double_int, double_int, bool, unsigned);
>  double_int double_int_sdiv (double_int, double_int, unsigned);
>  double_int double_int_udiv (double_int, double_int, unsigned);
> @@ -157,9 +181,22 @@
>   return a;
>  }
>
> +/* Returns A ^ B.  */
> +
> +static inline double_int
> +double_int_xor (double_int a, double_int b)
> +{
> +  a.low ^= b.low;
> +  a.high ^= b.high;
> +  return a;
> +}
> +
> +
>  /* Shift operations.  */
>  double_int double_int_lshift (double_int, HOST_WIDE_INT, unsigned int, bool);
>  double_int double_int_rshift (double_int, HOST_WIDE_INT, unsigned int, bool);
> +double_int double_int_lrotate (double_int, HOST_WIDE_INT, unsigned int);
> +double_int double_int_rrotate (double_int, HOST_WIDE_INT, unsigned int);
>
>  /* Returns true if CST is negative.  Of course, CST is considered to
>    be signed.  */
> @@ -173,6 +210,15 @@
>  int double_int_cmp (double_int, double_int, bool);
>  int double_int_scmp (double_int, double_int);
>  int double_int_ucmp (double_int, double_int);
> +
> +double_int double_int_max (double_int, double_int, bool);
> +double_int double_int_smax (double_int, double_int);
> +double_int double_int_umax (double_int, double_int);
> +
> +double_int double_int_min (double_int, double_int, bool);
> +double_int double_int_smin (double_int, double_int);
> +double_int double_int_umin (double_int, double_int);
> +
>  void dump_double_int (FILE *, double_int, bool);
>
>  /* Zero and sign extension of numbers in smaller precisions.  */
> @@ -248,12 +294,6 @@
>  extern void rshift_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
>                           HOST_WIDE_INT, unsigned int,
>                           unsigned HOST_WIDE_INT *, HOST_WIDE_INT *, bool);
> -extern void lrotate_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
> -                           HOST_WIDE_INT, unsigned int,
> -                           unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
> -extern void rrotate_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
> -                           HOST_WIDE_INT, unsigned int,
> -                           unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
>  extern int div_and_round_double (unsigned, int, unsigned HOST_WIDE_INT,
>                                 HOST_WIDE_INT, unsigned HOST_WIDE_INT,
>                                 HOST_WIDE_INT, unsigned HOST_WIDE_INT *,
> Index: gcc/fold-const.c
> ===================================================================
> --- gcc/fold-const.c    (revision 160850)
> +++ gcc/fold-const.c    (working copy)
> @@ -924,145 +924,140 @@
>  tree
>  int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2, int notrunc)
>  {
> -  unsigned HOST_WIDE_INT int1l, int2l;
> -  HOST_WIDE_INT int1h, int2h;
> -  unsigned HOST_WIDE_INT low;
> -  HOST_WIDE_INT hi;
> -  unsigned HOST_WIDE_INT garbagel;
> -  HOST_WIDE_INT garbageh;
> +  double_int op1, op2, res, tmp;
>   tree t;
>   tree type = TREE_TYPE (arg1);
> -  int uns = TYPE_UNSIGNED (type);
> -  int is_sizetype
> +  bool uns = TYPE_UNSIGNED (type);
> +  bool is_sizetype
>     = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
> -  int overflow = 0;
> +  bool overflow = false;
>
> -  int1l = TREE_INT_CST_LOW (arg1);
> -  int1h = TREE_INT_CST_HIGH (arg1);
> -  int2l = TREE_INT_CST_LOW (arg2);
> -  int2h = TREE_INT_CST_HIGH (arg2);
> +  op1 = tree_to_double_int (arg1);
> +  op2 = tree_to_double_int (arg2);
>
>   switch (code)
>     {
>     case BIT_IOR_EXPR:
> -      low = int1l | int2l, hi = int1h | int2h;
> +      res = double_int_ior (op1, op2);
>       break;
>
>     case BIT_XOR_EXPR:
> -      low = int1l ^ int2l, hi = int1h ^ int2h;
> +      res = double_int_xor (op1, op2);
>       break;
>
>     case BIT_AND_EXPR:
> -      low = int1l & int2l, hi = int1h & int2h;
> +      res = double_int_and (op1, op2);
>       break;
>
>     case RSHIFT_EXPR:
> -      int2l = -int2l;
> +      res = double_int_rshift (op1, double_int_to_shwi (op2),
> +                              TYPE_PRECISION (type), !uns);
> +      break;
> +
>     case LSHIFT_EXPR:
>       /* It's unclear from the C standard whether shifts can overflow.
>         The following code ignores overflow; perhaps a C standard
>         interpretation ruling is needed.  */
> -      lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
> -                    &low, &hi, !uns);
> +      res = double_int_lshift (op1, double_int_to_shwi (op2),
> +                              TYPE_PRECISION (type), !uns);
>       break;
>
>     case RROTATE_EXPR:
> -      int2l = - int2l;
> +      res = double_int_rrotate (op1, double_int_to_shwi (op2),
> +                               TYPE_PRECISION (type));
> +      break;
> +
>     case LROTATE_EXPR:
> -      lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
> -                     &low, &hi);
> +      res = double_int_lrotate (op1, double_int_to_shwi (op2),
> +                               TYPE_PRECISION (type));
>       break;
>
>     case PLUS_EXPR:
> -      overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
> +      overflow = add_double (op1.low, op1.high, op2.low, op2.high,
> +                            &res.low, &res.high);
>       break;
>
>     case MINUS_EXPR:
> -      neg_double (int2l, int2h, &low, &hi);
> -      add_double (int1l, int1h, low, hi, &low, &hi);
> -      overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
> +      neg_double (op2.low, op2.high, &res.low, &res.high);
> +      add_double (op1.low, op1.high, res.low, res.high,
> +                 &res.low, &res.high);
> +      overflow = OVERFLOW_SUM_SIGN (res.high, op2.high, op1.high);
>       break;
>
>     case MULT_EXPR:
> -      overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
> +      overflow = mul_double (op1.low, op1.high, op2.low, op2.high,
> +                            &res.low, &res.high);
>       break;
>
>     case TRUNC_DIV_EXPR:
>     case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
>     case EXACT_DIV_EXPR:
>       /* This is a shortcut for a common special case.  */
> -      if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
> +      if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0
>          && !TREE_OVERFLOW (arg1)
>          && !TREE_OVERFLOW (arg2)
> -         && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
> +         && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0)
>        {
>          if (code == CEIL_DIV_EXPR)
> -           int1l += int2l - 1;
> +           op1.low += op2.low - 1;
>
> -         low = int1l / int2l, hi = 0;
> +         res.low = op1.low / op2.low, res.high = 0;
>          break;
>        }
>
>       /* ... fall through ...  */
>
>     case ROUND_DIV_EXPR:
> -      if (int2h == 0 && int2l == 0)
> +      if (double_int_zero_p (op2))
>        return NULL_TREE;
> -      if (int2h == 0 && int2l == 1)
> +      if (double_int_one_p (op2))
>        {
> -         low = int1l, hi = int1h;
> +         res = op1;
>          break;
>        }
> -      if (int1l == int2l && int1h == int2h
> -         && ! (int1l == 0 && int1h == 0))
> +      if (double_int_equal_p (op1, op2)
> +         && ! double_int_zero_p (op1))
>        {
> -         low = 1, hi = 0;
> +         res = double_int_one;
>          break;
>        }
> -      overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
> -                                      &low, &hi, &garbagel, &garbageh);
> +      overflow = div_and_round_double (code, uns,
> +                                      op1.low, op1.high, op2.low, op2.high,
> +                                      &res.low, &res.high,
> +                                      &tmp.low, &tmp.high);
>       break;
>
>     case TRUNC_MOD_EXPR:
>     case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
>       /* This is a shortcut for a common special case.  */
> -      if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
> +      if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0
>          && !TREE_OVERFLOW (arg1)
>          && !TREE_OVERFLOW (arg2)
> -         && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
> +         && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0)
>        {
>          if (code == CEIL_MOD_EXPR)
> -           int1l += int2l - 1;
> -         low = int1l % int2l, hi = 0;
> +           op1.low += op2.low - 1;
> +         res.low = op1.low % op2.low, res.high = 0;
>          break;
>        }
>
>       /* ... fall through ...  */
>
>     case ROUND_MOD_EXPR:
> -      if (int2h == 0 && int2l == 0)
> +      if (double_int_zero_p (op2))
>        return NULL_TREE;
>       overflow = div_and_round_double (code, uns,
> -                                      int1l, int1h, int2l, int2h,
> -                                      &garbagel, &garbageh, &low, &hi);
> +                                      op1.low, op1.high, op2.low, op2.high,
> +                                      &tmp.low, &tmp.high,
> +                                      &res.low, &res.high);
>       break;
>
>     case MIN_EXPR:
> -    case MAX_EXPR:
> -      if (uns)
> -       low = (((unsigned HOST_WIDE_INT) int1h
> -               < (unsigned HOST_WIDE_INT) int2h)
> -              || (((unsigned HOST_WIDE_INT) int1h
> -                   == (unsigned HOST_WIDE_INT) int2h)
> -                  && int1l < int2l));
> -      else
> -       low = (int1h < int2h
> -              || (int1h == int2h && int1l < int2l));
> +      res = double_int_min (op1, op2, uns);
> +      break;
>
> -      if (low == (code == MIN_EXPR))
> -       low = int1l, hi = int1h;
> -      else
> -       low = int2l, hi = int2h;
> +    case MAX_EXPR:
> +      res = double_int_max (op1, op2, uns);
>       break;
>
>     default:
> @@ -1071,7 +1066,7 @@
>
>   if (notrunc)
>     {
> -      t = build_int_cst_wide (TREE_TYPE (arg1), low, hi);
> +      t = build_int_cst_wide (TREE_TYPE (arg1), res.low, res.high);
>
>       /* Propagate overflow flags ourselves.  */
>       if (((!uns || is_sizetype) && overflow)
> @@ -1082,7 +1077,7 @@
>        }
>     }
>   else
> -    t = force_fit_type_double (TREE_TYPE (arg1), low, hi, 1,
> +    t = force_fit_type_double (TREE_TYPE (arg1), res.low, res.high, 1,
>                               ((!uns || is_sizetype) && overflow)
>                               | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
>
> Index: gcc/simplify-rtx.c
> ===================================================================
> --- gcc/simplify-rtx.c  (revision 160850)
> +++ gcc/simplify-rtx.c  (working copy)
> @@ -3268,141 +3268,121 @@
>
>   /* We can fold some multi-word operations.  */
>   if (GET_MODE_CLASS (mode) == MODE_INT
> -      && width == HOST_BITS_PER_WIDE_INT * 2
> -      && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
> -      && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
> +      && width == HOST_BITS_PER_DOUBLE_INT
> +      && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
> +      && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
>     {
> -      unsigned HOST_WIDE_INT l1, l2, lv, lt;
> -      HOST_WIDE_INT h1, h2, hv, ht;
> +      double_int o0, o1, res, tmp;
>
> -      if (GET_CODE (op0) == CONST_DOUBLE)
> -       l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
> -      else
> -       l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
> +      o0 = rtx_to_double_int (op0);
> +      o1 = rtx_to_double_int (op1);
>
> -      if (GET_CODE (op1) == CONST_DOUBLE)
> -       l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
> -      else
> -       l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
> -
>       switch (code)
>        {
>        case MINUS:
>          /* A - B == A + (-B).  */
> -         neg_double (l2, h2, &lv, &hv);
> -         l2 = lv, h2 = hv;
> +         o1 = double_int_neg (o1);
>
>          /* Fall through....  */
>
>        case PLUS:
> -         add_double (l1, h1, l2, h2, &lv, &hv);
> +         res = double_int_add (o0, o1);
>          break;
>
>        case MULT:
> -         mul_double (l1, h1, l2, h2, &lv, &hv);
> +         res = double_int_mul (o0, o1);
>          break;
>
>        case DIV:
> -         if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
> -                                   &lv, &hv, &lt, &ht))
> +         if (div_and_round_double (TRUNC_DIV_EXPR, 0,
> +                                   o0.low, o0.high, o1.low, o1.high,
> +                                   &res.low, &res.high,
> +                                   &tmp.low, &tmp.high))
>            return 0;
>          break;
>
>        case MOD:
> -         if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
> -                                   &lt, &ht, &lv, &hv))
> +         if (div_and_round_double (TRUNC_DIV_EXPR, 0,
> +                                   o0.low, o0.high, o1.low, o1.high,
> +                                   &tmp.low, &tmp.high,
> +                                   &res.low, &res.high))
>            return 0;
>          break;
>
>        case UDIV:
> -         if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
> -                                   &lv, &hv, &lt, &ht))
> +         if (div_and_round_double (TRUNC_DIV_EXPR, 1,
> +                                   o0.low, o0.high, o1.low, o1.high,
> +                                   &res.low, &res.high,
> +                                   &tmp.low, &tmp.high))
>            return 0;
>          break;
>
>        case UMOD:
> -         if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
> -                                   &lt, &ht, &lv, &hv))
> +         if (div_and_round_double (TRUNC_DIV_EXPR, 1,
> +                                   o0.low, o0.high, o1.low, o1.high,
> +                                   &tmp.low, &tmp.high,
> +                                   &res.low, &res.high))
>            return 0;
>          break;
>
>        case AND:
> -         lv = l1 & l2, hv = h1 & h2;
> +         res = double_int_and (o0, o1);
>          break;
>
>        case IOR:
> -         lv = l1 | l2, hv = h1 | h2;
> +         res = double_int_ior (o0, o1);
>          break;
>
>        case XOR:
> -         lv = l1 ^ l2, hv = h1 ^ h2;
> +         res = double_int_xor (o0, o1);
>          break;
>
>        case SMIN:
> -         if (h1 < h2
> -             || (h1 == h2
> -                 && ((unsigned HOST_WIDE_INT) l1
> -                     < (unsigned HOST_WIDE_INT) l2)))
> -           lv = l1, hv = h1;
> -         else
> -           lv = l2, hv = h2;
> +         res = double_int_smin (o0, o1);
>          break;
>
>        case SMAX:
> -         if (h1 > h2
> -             || (h1 == h2
> -                 && ((unsigned HOST_WIDE_INT) l1
> -                     > (unsigned HOST_WIDE_INT) l2)))
> -           lv = l1, hv = h1;
> -         else
> -           lv = l2, hv = h2;
> +         res = double_int_smax (o0, o1);
>          break;
>
>        case UMIN:
> -         if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
> -             || (h1 == h2
> -                 && ((unsigned HOST_WIDE_INT) l1
> -                     < (unsigned HOST_WIDE_INT) l2)))
> -           lv = l1, hv = h1;
> -         else
> -           lv = l2, hv = h2;
> +         res = double_int_umin (o0, o1);
>          break;
>
>        case UMAX:
> -         if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
> -             || (h1 == h2
> -                 && ((unsigned HOST_WIDE_INT) l1
> -                     > (unsigned HOST_WIDE_INT) l2)))
> -           lv = l1, hv = h1;
> -         else
> -           lv = l2, hv = h2;
> +         res = double_int_umax (o0, o1);
>          break;
>
>        case LSHIFTRT:   case ASHIFTRT:
>        case ASHIFT:
>        case ROTATE:     case ROTATERT:
>          if (SHIFT_COUNT_TRUNCATED)
> -           l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
> +           o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
>
> -         if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
> +         if (!double_int_fits_in_uhwi_p (o1)
> +             || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
>            return 0;

Can you compute the shift amount (double_int_to_uhwi (o1)) into
a temporary and re-use that in the following if-cascade to make
the code more readable?

Ok with that change.

If you don't have enough stuff on your list, something that I have
noticed again is the notrunc parameter to int_const_binop.  It
ought to go and we should always truncate - I believe nearly
all callers call int_const_binop with zero, others might choose
to either pass one 1) for speed reasons 2) because they
really do not want to truncate.  I suspect all callers are of kind 1),
but that needs auditing.  Callers of kind 2) should be converted
to not build a tree result at all but instead use double_ints - if
such callers exist.

Thanks,
Richard.

>          if (code == LSHIFTRT || code == ASHIFTRT)
> -           rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
> -                          code == ASHIFTRT);
> +           res = double_int_rshift (o0, double_int_to_uhwi (o1),
> +                                    GET_MODE_BITSIZE (mode),
> +                                    code == ASHIFTRT);
>          else if (code == ASHIFT)
> -           lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
> +           res = double_int_lshift (o0, double_int_to_uhwi (o1),
> +                                    GET_MODE_BITSIZE (mode), true);
>          else if (code == ROTATE)
> -           lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
> +           res = double_int_lrotate (o0, double_int_to_uhwi (o1),
> +                                     GET_MODE_BITSIZE (mode));
>          else /* code == ROTATERT */
> -           rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
> +           res = double_int_rrotate (o0, double_int_to_uhwi (o1),
> +                                     GET_MODE_BITSIZE (mode));
>          break;
>
>        default:
>          return 0;
>        }
>
> -      return immed_double_const (lv, hv, mode);
> +      return immed_double_int_const (res, mode);
>     }
>
>   if (CONST_INT_P (op0) && CONST_INT_P (op1)
>
>
> Anatoly.
>
>
>
diff mbox

Patch

Index: gcc/double-int.c
===================================================================
--- gcc/double-int.c    (revision 160850)
+++ gcc/double-int.c    (working copy)
@@ -432,51 +432,6 @@ 
     }
 }
 
-/* Rotate the doubleword integer in L1, H1 left by COUNT places
-   keeping only PREC bits of result.
-   Rotate right if COUNT is negative.
-   Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV.  */
-
-void
-lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
-               HOST_WIDE_INT count, unsigned int prec,
-               unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
-{
-  unsigned HOST_WIDE_INT s1l, s2l;
-  HOST_WIDE_INT s1h, s2h;
-
-  count %= prec;
-  if (count < 0)
-    count += prec;
-
-  lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
-  rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
-  *lv = s1l | s2l;
-  *hv = s1h | s2h;
-}
-
-/* Rotate the doubleword integer in L1, H1 left by COUNT places
-   keeping only PREC bits of result.  COUNT must be positive.
-   Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV.  */
-
-void
-rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
-               HOST_WIDE_INT count, unsigned int prec,
-               unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
-{
-  unsigned HOST_WIDE_INT s1l, s2l;
-  HOST_WIDE_INT s1h, s2h;
-
-  count %= prec;
-  if (count < 0)
-    count += prec;
-
-  rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
-  lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
-  *lv = s1l | s2l;
-  *hv = s1h | s2h;
-}
-
 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
    for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
    CODE is a tree code for a kind of division, one of
@@ -842,14 +797,6 @@ 
   return r;
 }
 
-/* Returns true if CST fits in unsigned HOST_WIDE_INT.  */
-
-bool
-double_int_fits_in_uhwi_p (double_int cst)
-{
-  return cst.high == 0;
-}
-
 /* Returns true if CST fits in signed HOST_WIDE_INT.  */
 
 bool
@@ -875,24 +822,6 @@ 
     return double_int_fits_in_shwi_p (cst);
 }
 
-/* Returns value of CST as a signed number.  CST must satisfy
-   double_int_fits_in_shwi_p.  */
-
-HOST_WIDE_INT
-double_int_to_shwi (double_int cst)
-{
-  return (HOST_WIDE_INT) cst.low;
-}
-
-/* Returns value of CST as an unsigned number.  CST must satisfy
-   double_int_fits_in_uhwi_p.  */
-
-unsigned HOST_WIDE_INT
-double_int_to_uhwi (double_int cst)
-{
-  return cst.low;
-}
-
 /* Returns A * B.  */
 
 double_int
@@ -1049,6 +978,42 @@ 
   return ret;
 }
 
+/* Rotate  A left by COUNT places keeping only PREC bits of result.
+   Rotate right if COUNT is negative.  */
+
+double_int
+double_int_lrotate (double_int a, HOST_WIDE_INT count, unsigned int prec)
+{
+  double_int t1, t2;
+
+  count %= prec;
+  if (count < 0)
+    count += prec;
+
+  t1 = double_int_lshift (a, count, prec, false);
+  t2 = double_int_rshift (a, prec - count, prec, false);
+
+  return double_int_ior (t1, t2);
+}
+
+/* Rotate A rigth by COUNT places keeping only PREC bits of result.
+   Rotate right if COUNT is negative.  */
+
+double_int
+double_int_rrotate (double_int a, HOST_WIDE_INT count, unsigned int prec)
+{
+  double_int t1, t2;
+
+  count %= prec;
+  if (count < 0)
+    count += prec;
+
+  t1 = double_int_rshift (a, count, prec, false);
+  t2 = double_int_lshift (a, prec - count, prec, false);
+
+  return double_int_ior (t1, t2);
+}
+
 /* Returns -1 if A < B, 0 if A == B and 1 if A > B.  Signedness of the
    comparison is given by UNS.  */
 
@@ -1097,6 +1062,51 @@ 
   return 0;
 }
 
+/* Compares two values A and B.  Returns max value.  Signedness of the
+   comparison is given by UNS.  */
+
+double_int
+double_int_max (double_int a, double_int b, bool uns)
+{
+  return (double_int_cmp (a, b, uns) == 1) ? a : b;
+}
+
+/* Compares two signed values A and B.  Returns max value.  */
+
+double_int double_int_smax (double_int a, double_int b)
+{
+  return (double_int_scmp (a, b) == 1) ? a : b;
+}
+
+/* Compares two unsigned values A and B.  Returns max value.  */
+
+double_int double_int_umax (double_int a, double_int b)
+{
+  return (double_int_ucmp (a, b) == 1) ? a : b;
+}
+
+/* Compares two values A and B.  Returns mix value.  Signedness of the
+   comparison is given by UNS.  */
+
+double_int double_int_min (double_int a, double_int b, bool uns)
+{
+  return (double_int_cmp (a, b, uns) == -1) ? a : b;
+}
+
+/* Compares two signed values A and B.  Returns min value.  */
+
+double_int double_int_smin (double_int a, double_int b)
+{
+  return (double_int_scmp (a, b) == -1) ? a : b;
+}
+
+/* Compares two unsigned values A and B.  Returns min value.  */
+
+double_int double_int_umin (double_int a, double_int b)
+{
+  return (double_int_ucmp (a, b) == -1) ? a : b;
+}
+
 /* Splits last digit of *CST (taken as unsigned) in BASE and returns it.  */
 
 static unsigned
Index: gcc/double-int.h
===================================================================
--- gcc/double-int.h    (revision 160850)
+++ gcc/double-int.h    (working copy)
@@ -97,6 +97,35 @@ 
   return r;
 }
 
+/* Returns value of CST as a signed number.  CST must satisfy
+   double_int_fits_in_shwi_p.  */
+
+static inline HOST_WIDE_INT
+double_int_to_shwi (double_int cst)
+{
+  return (HOST_WIDE_INT) cst.low;
+}
+
+/* Returns value of CST as an unsigned number.  CST must satisfy
+   double_int_fits_in_uhwi_p.  */
+
+static inline unsigned HOST_WIDE_INT
+double_int_to_uhwi (double_int cst)
+{
+  return cst.low;
+}
+
+bool double_int_fits_in_hwi_p (double_int, bool);
+bool double_int_fits_in_shwi_p (double_int);
+
+/* Returns true if CST fits in unsigned HOST_WIDE_INT.  */
+
+static inline bool
+double_int_fits_in_uhwi_p (double_int cst)
+{
+  return cst.high == 0;
+}
+
 /* The following operations perform arithmetics modulo 2^precision,
    so you do not need to call double_int_ext between them, even if
    you are representing numbers with precision less than
@@ -109,11 +138,6 @@ 
 /* You must ensure that double_int_ext is called on the operands
    of the following operations, if the precision of the numbers
    is less than 2 * HOST_BITS_PER_WIDE_INT bits.  */
-bool double_int_fits_in_hwi_p (double_int, bool);
-bool double_int_fits_in_shwi_p (double_int);
-bool double_int_fits_in_uhwi_p (double_int);
-HOST_WIDE_INT double_int_to_shwi (double_int);
-unsigned HOST_WIDE_INT double_int_to_uhwi (double_int);
 double_int double_int_div (double_int, double_int, bool, unsigned);
 double_int double_int_sdiv (double_int, double_int, unsigned);
 double_int double_int_udiv (double_int, double_int, unsigned);
@@ -157,9 +181,22 @@ 
   return a;
 }
 
+/* Returns A ^ B.  */
+
+static inline double_int
+double_int_xor (double_int a, double_int b)
+{
+  a.low ^= b.low;
+  a.high ^= b.high;
+  return a;
+}
+
+
 /* Shift operations.  */
 double_int double_int_lshift (double_int, HOST_WIDE_INT, unsigned int, bool);
 double_int double_int_rshift (double_int, HOST_WIDE_INT, unsigned int, bool);
+double_int double_int_lrotate (double_int, HOST_WIDE_INT, unsigned int);
+double_int double_int_rrotate (double_int, HOST_WIDE_INT, unsigned int);
 
 /* Returns true if CST is negative.  Of course, CST is considered to
    be signed.  */
@@ -173,6 +210,15 @@ 
 int double_int_cmp (double_int, double_int, bool);
 int double_int_scmp (double_int, double_int);
 int double_int_ucmp (double_int, double_int);
+
+double_int double_int_max (double_int, double_int, bool);
+double_int double_int_smax (double_int, double_int);
+double_int double_int_umax (double_int, double_int);
+
+double_int double_int_min (double_int, double_int, bool);
+double_int double_int_smin (double_int, double_int);
+double_int double_int_umin (double_int, double_int);
+
 void dump_double_int (FILE *, double_int, bool);
 
 /* Zero and sign extension of numbers in smaller precisions.  */
@@ -248,12 +294,6 @@ 
 extern void rshift_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
                           HOST_WIDE_INT, unsigned int,
                           unsigned HOST_WIDE_INT *, HOST_WIDE_INT *, bool);
-extern void lrotate_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
-                           HOST_WIDE_INT, unsigned int,
-                           unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
-extern void rrotate_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
-                           HOST_WIDE_INT, unsigned int,
-                           unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
 extern int div_and_round_double (unsigned, int, unsigned HOST_WIDE_INT,
                                 HOST_WIDE_INT, unsigned HOST_WIDE_INT,
                                 HOST_WIDE_INT, unsigned HOST_WIDE_INT *,
Index: gcc/fold-const.c
===================================================================
--- gcc/fold-const.c    (revision 160850)
+++ gcc/fold-const.c    (working copy)
@@ -924,145 +924,140 @@ 
 tree
 int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2, int notrunc)
 {
-  unsigned HOST_WIDE_INT int1l, int2l;
-  HOST_WIDE_INT int1h, int2h;
-  unsigned HOST_WIDE_INT low;
-  HOST_WIDE_INT hi;
-  unsigned HOST_WIDE_INT garbagel;
-  HOST_WIDE_INT garbageh;
+  double_int op1, op2, res, tmp;
   tree t;
   tree type = TREE_TYPE (arg1);
-  int uns = TYPE_UNSIGNED (type);
-  int is_sizetype
+  bool uns = TYPE_UNSIGNED (type);
+  bool is_sizetype
     = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
-  int overflow = 0;
+  bool overflow = false;
 
-  int1l = TREE_INT_CST_LOW (arg1);
-  int1h = TREE_INT_CST_HIGH (arg1);
-  int2l = TREE_INT_CST_LOW (arg2);
-  int2h = TREE_INT_CST_HIGH (arg2);
+  op1 = tree_to_double_int (arg1);
+  op2 = tree_to_double_int (arg2);
 
   switch (code)
     {
     case BIT_IOR_EXPR:
-      low = int1l | int2l, hi = int1h | int2h;
+      res = double_int_ior (op1, op2);
       break;
 
     case BIT_XOR_EXPR:
-      low = int1l ^ int2l, hi = int1h ^ int2h;
+      res = double_int_xor (op1, op2);
       break;
 
     case BIT_AND_EXPR:
-      low = int1l & int2l, hi = int1h & int2h;
+      res = double_int_and (op1, op2);
       break;
 
     case RSHIFT_EXPR:
-      int2l = -int2l;
+      res = double_int_rshift (op1, double_int_to_shwi (op2),
+                              TYPE_PRECISION (type), !uns);
+      break;
+
     case LSHIFT_EXPR:
       /* It's unclear from the C standard whether shifts can overflow.
         The following code ignores overflow; perhaps a C standard
         interpretation ruling is needed.  */
-      lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
-                    &low, &hi, !uns);
+      res = double_int_lshift (op1, double_int_to_shwi (op2),
+                              TYPE_PRECISION (type), !uns);
       break;
 
     case RROTATE_EXPR:
-      int2l = - int2l;
+      res = double_int_rrotate (op1, double_int_to_shwi (op2),
+                               TYPE_PRECISION (type));
+      break;
+
     case LROTATE_EXPR:
-      lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
-                     &low, &hi);
+      res = double_int_lrotate (op1, double_int_to_shwi (op2),
+                               TYPE_PRECISION (type));
       break;
 
     case PLUS_EXPR:
-      overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
+      overflow = add_double (op1.low, op1.high, op2.low, op2.high,
+                            &res.low, &res.high);
       break;
 
     case MINUS_EXPR:
-      neg_double (int2l, int2h, &low, &hi);
-      add_double (int1l, int1h, low, hi, &low, &hi);
-      overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
+      neg_double (op2.low, op2.high, &res.low, &res.high);
+      add_double (op1.low, op1.high, res.low, res.high,
+                 &res.low, &res.high);
+      overflow = OVERFLOW_SUM_SIGN (res.high, op2.high, op1.high);
       break;
 
     case MULT_EXPR:
-      overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
+      overflow = mul_double (op1.low, op1.high, op2.low, op2.high,
+                            &res.low, &res.high);
       break;
 
     case TRUNC_DIV_EXPR:
     case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
     case EXACT_DIV_EXPR:
       /* This is a shortcut for a common special case.  */
-      if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
+      if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0
          && !TREE_OVERFLOW (arg1)
          && !TREE_OVERFLOW (arg2)
-         && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
+         && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0)
        {
          if (code == CEIL_DIV_EXPR)
-           int1l += int2l - 1;
+           op1.low += op2.low - 1;
 
-         low = int1l / int2l, hi = 0;
+         res.low = op1.low / op2.low, res.high = 0;
          break;
        }
 
       /* ... fall through ...  */
 
     case ROUND_DIV_EXPR:
-      if (int2h == 0 && int2l == 0)
+      if (double_int_zero_p (op2))
        return NULL_TREE;
-      if (int2h == 0 && int2l == 1)
+      if (double_int_one_p (op2))
        {
-         low = int1l, hi = int1h;
+         res = op1;
          break;
        }
-      if (int1l == int2l && int1h == int2h
-         && ! (int1l == 0 && int1h == 0))
+      if (double_int_equal_p (op1, op2)
+         && ! double_int_zero_p (op1))
        {
-         low = 1, hi = 0;
+         res = double_int_one;
          break;
        }
-      overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
-                                      &low, &hi, &garbagel, &garbageh);
+      overflow = div_and_round_double (code, uns,
+                                      op1.low, op1.high, op2.low, op2.high,
+                                      &res.low, &res.high,
+                                      &tmp.low, &tmp.high);
       break;
 
     case TRUNC_MOD_EXPR:
     case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
       /* This is a shortcut for a common special case.  */
-      if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
+      if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0
          && !TREE_OVERFLOW (arg1)
          && !TREE_OVERFLOW (arg2)
-         && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
+         && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0)
        {
          if (code == CEIL_MOD_EXPR)
-           int1l += int2l - 1;
-         low = int1l % int2l, hi = 0;
+           op1.low += op2.low - 1;
+         res.low = op1.low % op2.low, res.high = 0;
          break;
        }
 
       /* ... fall through ...  */
 
     case ROUND_MOD_EXPR:
-      if (int2h == 0 && int2l == 0)
+      if (double_int_zero_p (op2))
        return NULL_TREE;
       overflow = div_and_round_double (code, uns,
-                                      int1l, int1h, int2l, int2h,
-                                      &garbagel, &garbageh, &low, &hi);
+                                      op1.low, op1.high, op2.low, op2.high,
+                                      &tmp.low, &tmp.high,
+                                      &res.low, &res.high);
       break;
 
     case MIN_EXPR:
-    case MAX_EXPR:
-      if (uns)
-       low = (((unsigned HOST_WIDE_INT) int1h
-               < (unsigned HOST_WIDE_INT) int2h)
-              || (((unsigned HOST_WIDE_INT) int1h
-                   == (unsigned HOST_WIDE_INT) int2h)
-                  && int1l < int2l));
-      else
-       low = (int1h < int2h
-              || (int1h == int2h && int1l < int2l));
+      res = double_int_min (op1, op2, uns);
+      break;
 
-      if (low == (code == MIN_EXPR))
-       low = int1l, hi = int1h;
-      else
-       low = int2l, hi = int2h;
+    case MAX_EXPR:
+      res = double_int_max (op1, op2, uns);
       break;
 
     default:
@@ -1071,7 +1066,7 @@ 
 
   if (notrunc)
     {
-      t = build_int_cst_wide (TREE_TYPE (arg1), low, hi);
+      t = build_int_cst_wide (TREE_TYPE (arg1), res.low, res.high);
 
       /* Propagate overflow flags ourselves.  */
       if (((!uns || is_sizetype) && overflow)
@@ -1082,7 +1077,7 @@ 
        }
     }
   else
-    t = force_fit_type_double (TREE_TYPE (arg1), low, hi, 1,
+    t = force_fit_type_double (TREE_TYPE (arg1), res.low, res.high, 1,
                               ((!uns || is_sizetype) && overflow)
                               | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
 
Index: gcc/simplify-rtx.c
===================================================================
--- gcc/simplify-rtx.c  (revision 160850)
+++ gcc/simplify-rtx.c  (working copy)
@@ -3268,141 +3268,121 @@ 
 
   /* We can fold some multi-word operations.  */
   if (GET_MODE_CLASS (mode) == MODE_INT
-      && width == HOST_BITS_PER_WIDE_INT * 2
-      && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
-      && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
+      && width == HOST_BITS_PER_DOUBLE_INT
+      && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
+      && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
     {
-      unsigned HOST_WIDE_INT l1, l2, lv, lt;
-      HOST_WIDE_INT h1, h2, hv, ht;
+      double_int o0, o1, res, tmp;
 
-      if (GET_CODE (op0) == CONST_DOUBLE)
-       l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
-      else
-       l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
+      o0 = rtx_to_double_int (op0);
+      o1 = rtx_to_double_int (op1);
 
-      if (GET_CODE (op1) == CONST_DOUBLE)
-       l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
-      else
-       l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
-
       switch (code)
        {
        case MINUS:
          /* A - B == A + (-B).  */
-         neg_double (l2, h2, &lv, &hv);
-         l2 = lv, h2 = hv;
+         o1 = double_int_neg (o1);
 
          /* Fall through....  */
 
        case PLUS:
-         add_double (l1, h1, l2, h2, &lv, &hv);
+         res = double_int_add (o0, o1);
          break;
 
        case MULT:
-         mul_double (l1, h1, l2, h2, &lv, &hv);
+         res = double_int_mul (o0, o1);
          break;
 
        case DIV:
-         if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
-                                   &lv, &hv, &lt, &ht))
+         if (div_and_round_double (TRUNC_DIV_EXPR, 0,
+                                   o0.low, o0.high, o1.low, o1.high,
+                                   &res.low, &res.high,
+                                   &tmp.low, &tmp.high))
            return 0;
          break;
 
        case MOD:
-         if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
-                                   &lt, &ht, &lv, &hv))
+         if (div_and_round_double (TRUNC_DIV_EXPR, 0,
+                                   o0.low, o0.high, o1.low, o1.high,
+                                   &tmp.low, &tmp.high,
+                                   &res.low, &res.high))
            return 0;
          break;
 
        case UDIV:
-         if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
-                                   &lv, &hv, &lt, &ht))
+         if (div_and_round_double (TRUNC_DIV_EXPR, 1,
+                                   o0.low, o0.high, o1.low, o1.high,
+                                   &res.low, &res.high,
+                                   &tmp.low, &tmp.high))
            return 0;
          break;
 
        case UMOD:
-         if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
-                                   &lt, &ht, &lv, &hv))
+         if (div_and_round_double (TRUNC_DIV_EXPR, 1,
+                                   o0.low, o0.high, o1.low, o1.high,
+                                   &tmp.low, &tmp.high,
+                                   &res.low, &res.high))
            return 0;
          break;
 
        case AND:
-         lv = l1 & l2, hv = h1 & h2;
+         res = double_int_and (o0, o1);
          break;
 
        case IOR:
-         lv = l1 | l2, hv = h1 | h2;
+         res = double_int_ior (o0, o1);
          break;
 
        case XOR:
-         lv = l1 ^ l2, hv = h1 ^ h2;
+         res = double_int_xor (o0, o1);
          break;
 
        case SMIN:
-         if (h1 < h2
-             || (h1 == h2
-                 && ((unsigned HOST_WIDE_INT) l1
-                     < (unsigned HOST_WIDE_INT) l2)))
-           lv = l1, hv = h1;
-         else
-           lv = l2, hv = h2;
+         res = double_int_smin (o0, o1);
          break;
 
        case SMAX:
-         if (h1 > h2
-             || (h1 == h2
-                 && ((unsigned HOST_WIDE_INT) l1
-                     > (unsigned HOST_WIDE_INT) l2)))
-           lv = l1, hv = h1;
-         else
-           lv = l2, hv = h2;
+         res = double_int_smax (o0, o1);
          break;
 
        case UMIN:
-         if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
-             || (h1 == h2
-                 && ((unsigned HOST_WIDE_INT) l1
-                     < (unsigned HOST_WIDE_INT) l2)))
-           lv = l1, hv = h1;
-         else
-           lv = l2, hv = h2;
+         res = double_int_umin (o0, o1);
          break;
 
        case UMAX:
-         if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
-             || (h1 == h2
-                 && ((unsigned HOST_WIDE_INT) l1
-                     > (unsigned HOST_WIDE_INT) l2)))
-           lv = l1, hv = h1;
-         else
-           lv = l2, hv = h2;
+         res = double_int_umax (o0, o1);
          break;
 
        case LSHIFTRT:   case ASHIFTRT:
        case ASHIFT:
        case ROTATE:     case ROTATERT:
          if (SHIFT_COUNT_TRUNCATED)
-           l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
+           o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
 
-         if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
+         if (!double_int_fits_in_uhwi_p (o1)
+             || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
            return 0;
 
          if (code == LSHIFTRT || code == ASHIFTRT)
-           rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
-                          code == ASHIFTRT);
+           res = double_int_rshift (o0, double_int_to_uhwi (o1),
+                                    GET_MODE_BITSIZE (mode),
+                                    code == ASHIFTRT);
          else if (code == ASHIFT)
-           lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
+           res = double_int_lshift (o0, double_int_to_uhwi (o1),
+                                    GET_MODE_BITSIZE (mode), true);
          else if (code == ROTATE)
-           lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
+           res = double_int_lrotate (o0, double_int_to_uhwi (o1),
+                                     GET_MODE_BITSIZE (mode));
          else /* code == ROTATERT */
-           rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
+           res = double_int_rrotate (o0, double_int_to_uhwi (o1),
+                                     GET_MODE_BITSIZE (mode));
          break;
 
        default:
          return 0;
        }
 
-      return immed_double_const (lv, hv, mode);
+      return immed_double_int_const (res, mode);
     }
 
   if (CONST_INT_P (op0) && CONST_INT_P (op1)