===================================================================
@@ -1422,6 +1422,7 @@ wi::int_traits <rtx_mode_t>::get_precisi
return GET_MODE_PRECISION (x.second);
}
+#if 0
inline wi::storage_ref
wi::int_traits <rtx_mode_t>::decompose (HOST_WIDE_INT *,
unsigned int precision,
@@ -1437,13 +1438,72 @@ wi::int_traits <rtx_mode_t>::decompose (
return wi::storage_ref (&CONST_WIDE_INT_ELT (x.first, 0),
CONST_WIDE_INT_NUNITS (x.first), precision);
+#if TARGET_SUPPORTS_WIDE_INT != 0
case CONST_DOUBLE:
return wi::storage_ref (&CONST_DOUBLE_LOW (x.first), 2, precision);
+#endif
default:
gcc_unreachable ();
}
}
+#else
+/* For now, assume that the storage is not canonical, i.e. that there
+ are bits above the precision that are not all zeros or all ones.
+ If this is fixed in rtl, then we will not need the calls to
+ force_to_size. */
+inline wi::storage_ref
+wi::int_traits <rtx_mode_t>::decompose (HOST_WIDE_INT *scratch,
+ unsigned int precision,
+ const rtx_mode_t &x)
+{
+ int len;
+ int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
+ int blocks_needed = (precision + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT;
+
+ gcc_checking_assert (precision == get_precision (x));
+ switch (GET_CODE (x.first))
+ {
+ case CONST_INT:
+ len = 1;
+ if (small_prec)
+ scratch[0] = sext_hwi (INTVAL (x.first), precision);
+ else
+ scratch = &INTVAL (x.first);
+ break;
+
+ case CONST_WIDE_INT:
+ len = CONST_WIDE_INT_NUNITS (x.first);
+ if (small_prec && blocks_needed == len - 1)
+ {
+ int i;
+ for (i = 0; i < len - 1; i++)
+ scratch[i] = CONST_WIDE_INT_ELT (x.first, i);
+ scratch[len - 1] = sext_hwi (CONST_WIDE_INT_ELT (x.first, i), small_prec);
+ }
+ else
+ scratch = &CONST_WIDE_INT_ELT (x.first, 0);
+ break;
+
+#if TARGET_SUPPORTS_WIDE_INT == 0
+ case CONST_DOUBLE:
+ len = 2;
+ if (small_prec)
+ {
+ scratch[0] = CONST_DOUBLE_LOW (x.first);
+ scratch[1] = sext_hwi (CONST_DOUBLE_HIGH (x.first), small_prec);
+ }
+ else
+ scratch = &CONST_DOUBLE_LOW (x.first);
+ break;
+#endif
+
+ default:
+ gcc_unreachable ();
+ }
+ return wi::storage_ref (scratch, len, precision);
+}
+#endif
namespace wi
{
===================================================================
@@ -48,6 +48,9 @@ static const HOST_WIDE_INT zeros[WIDE_IN
(PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
#define SIGN_MASK(X) (((HOST_WIDE_INT)X) >> (HOST_BITS_PER_WIDE_INT - 1))
+/* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
+ based on the top existing bit of VAL. */
+
static unsigned HOST_WIDE_INT
safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
{
@@ -304,10 +307,10 @@ wi::force_to_size (HOST_WIDE_INT *val, c
if (precision > xprecision)
{
/* Expanding. */
- unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
-
if (sgn == UNSIGNED)
{
+ unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
+
if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
else if (val[len - 1] < 0)
@@ -320,11 +323,6 @@ wi::force_to_size (HOST_WIDE_INT *val, c
val[len++] = 0;
}
}
- /* We have to do this because we cannot guarantee that there is
- not trash in the top block of an uncompressed value. For a
- compressed value, all the bits are significant. */
- else if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
- val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
}
else if (precision < xprecision)
/* Contracting. */
@@ -352,27 +350,18 @@ selt (const HOST_WIDE_INT *a, unsigned i
return 0;
}
- if (small_prec && index == blocks_needed - 1)
- {
- /* The top block is partially outside of the precision. */
- if (sgn == SIGNED)
- return sext_hwi (a[index], small_prec);
- else
- return zext_hwi (a[index], small_prec);
- }
- return a[index];
+ if (sgn == UNSIGNED && small_prec && index == blocks_needed - 1)
+ return zext_hwi (a[index], small_prec);
+ else
+ return a[index];
}
-/* Find the hignest bit represented in a wide int. This will in
+/* Find the highest bit represented in a wide int. This will in
general have the same value as the sign bit. */
static inline HOST_WIDE_INT
-top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
+top_bit_of (const HOST_WIDE_INT *a, unsigned int len)
{
- if (len == BLOCKS_NEEDED (prec)
- && (prec & (HOST_BITS_PER_WIDE_INT - 1)))
- return (a[len - 1] >> (prec & (HOST_BITS_PER_WIDE_INT - 1))) & 1;
- else
- return (a[len - 1] >> (HOST_BITS_PER_WIDE_INT - 1)) & 1;
+ return (a[len - 1] >> (HOST_BITS_PER_WIDE_INT - 1)) & 1;
}
/*
@@ -384,24 +373,13 @@ top_bit_of (const HOST_WIDE_INT *a, unsi
/* Return true if OP0 == OP1. */
bool
wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- const HOST_WIDE_INT *op1, unsigned int op1len,
- unsigned int prec)
+ const HOST_WIDE_INT *op1, unsigned int op1len)
{
int l0 = op0len - 1;
- unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
while (op0len != op1len)
return false;
- if (op0len == BLOCKS_NEEDED (prec) && small_prec)
- {
- /* It does not matter if we zext or sext here, we just have to
- do both the same way. */
- if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
- return false;
- l0--;
- }
-
while (l0 >= 0)
if (op0[l0] != op1[l0])
return false;
@@ -658,7 +636,7 @@ wi::set_bit_large (HOST_WIDE_INT *val, c
/* If the bit we just set is at the msb of the block, make sure
that any higher bits are zeros. */
- if (bit + 1 < precision && bit == HOST_BITS_PER_WIDE_INT - 1)
+ if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
val[len++] = 0;
return len;
}
@@ -821,7 +799,7 @@ wi::and_large (HOST_WIDE_INT *val, const
unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
- HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
if (op1mask == 0)
{
l0 = l1;
@@ -839,7 +817,7 @@ wi::and_large (HOST_WIDE_INT *val, const
}
else if (l1 > l0)
{
- HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
if (op0mask == 0)
len = l0 + 1;
else
@@ -879,7 +857,7 @@ wi::and_not_large (HOST_WIDE_INT *val, c
unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
- HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
if (op1mask != 0)
{
l0 = l1;
@@ -897,7 +875,7 @@ wi::and_not_large (HOST_WIDE_INT *val, c
}
else if (l1 > l0)
{
- HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
if (op0mask == 0)
len = l0 + 1;
else
@@ -937,7 +915,7 @@ wi::or_large (HOST_WIDE_INT *val, const
unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
- HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
if (op1mask != 0)
{
l0 = l1;
@@ -955,7 +933,7 @@ wi::or_large (HOST_WIDE_INT *val, const
}
else if (l1 > l0)
{
- HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
if (op0mask != 0)
len = l0 + 1;
else
@@ -995,7 +973,7 @@ wi::or_not_large (HOST_WIDE_INT *val, co
unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
- HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
if (op1mask == 0)
{
l0 = l1;
@@ -1013,7 +991,7 @@ wi::or_not_large (HOST_WIDE_INT *val, co
}
else if (l1 > l0)
{
- HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
if (op0mask != 0)
len = l0 + 1;
else
@@ -1052,7 +1030,7 @@ wi::xor_large (HOST_WIDE_INT *val, const
unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
- HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
while (l0 > l1)
{
val[l0] = op0[l0] ^ op1mask;
@@ -1062,7 +1040,7 @@ wi::xor_large (HOST_WIDE_INT *val, const
if (l1 > l0)
{
- HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
while (l1 > l0)
{
val[l1] = op0mask ^ op1[l1];
@@ -1101,8 +1079,8 @@ wi::add_large (HOST_WIDE_INT *val, const
unsigned int i, small_prec;
unsigned int len = MAX (op0len, op1len);
- mask0 = -top_bit_of (op0, op0len, prec);
- mask1 = -top_bit_of (op1, op1len, prec);
+ mask0 = -top_bit_of (op0, op0len);
+ mask1 = -top_bit_of (op1, op1len);
/* Add all of the explicitly defined elements. */
for (i = 0; i < len; i++)
@@ -1142,6 +1120,7 @@ wi::add_large (HOST_WIDE_INT *val, const
}
}
+ /* Canonize the top of the top block. */
small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
if (small_prec != 0 && BLOCKS_NEEDED (prec) == len)
{
@@ -1211,7 +1190,7 @@ wi_unpack (unsigned HOST_HALF_WIDE_INT *
if (sgn == SIGNED)
{
- mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
+ mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len);
mask &= HALF_INT_MASK;
}
else
@@ -1501,8 +1480,8 @@ wi::sub_large (HOST_WIDE_INT *val, const
unsigned int i, small_prec;
unsigned int len = MAX (op0len, op1len);
- mask0 = -top_bit_of (op0, op0len, prec);
- mask1 = -top_bit_of (op1, op1len, prec);
+ mask0 = -top_bit_of (op0, op0len);
+ mask1 = -top_bit_of (op1, op1len);
/* Subtract all of the explicitly defined elements. */
for (i = 0; i < len; i++)
@@ -1541,7 +1520,7 @@ wi::sub_large (HOST_WIDE_INT *val, const
}
}
-
+ /* Canonize the top of the top block. */
small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
if (small_prec != 0 && BLOCKS_NEEDED (prec) == len)
{
@@ -1790,14 +1769,14 @@ wi::divmod_internal (HOST_WIDE_INT *quot
did. */
if (sgn == SIGNED)
{
- if (top_bit_of (dividend, dividend_len, dividend_prec))
+ if (top_bit_of (dividend, dividend_len))
{
dividend_len = wi::sub_large (u0, zeros, 1, dividend, dividend_len,
dividend_prec, UNSIGNED, 0);
dividend = u0;
dividend_neg = true;
}
- if (top_bit_of (divisor, divisor_len, divisor_prec))
+ if (top_bit_of (divisor, divisor_len))
{
divisor_len = wi::sub_large (u1, zeros, 1, divisor, divisor_len,
divisor_prec, UNSIGNED, 0);
@@ -1811,12 +1790,12 @@ wi::divmod_internal (HOST_WIDE_INT *quot
wi_unpack (b_divisor, (const unsigned HOST_WIDE_INT*)divisor,
divisor_len, divisor_blocks_needed, divisor_prec, sgn);
- if (top_bit_of (dividend, dividend_len, dividend_prec) && sgn == SIGNED)
+ if (top_bit_of (dividend, dividend_len) && sgn == SIGNED)
m = dividend_blocks_needed;
else
m = 2 * dividend_len;
- if (top_bit_of (divisor, divisor_len, divisor_prec) && sgn == SIGNED)
+ if (top_bit_of (divisor, divisor_len) && sgn == SIGNED)
n = divisor_blocks_needed;
else
n = 2 * divisor_len;
===================================================================
@@ -567,6 +567,8 @@ public:
HOST_WIDE_INT elt (unsigned int) const;
unsigned HOST_WIDE_INT ulow () const;
unsigned HOST_WIDE_INT uhigh () const;
+ HOST_WIDE_INT slow () const;
+ HOST_WIDE_INT shigh () const;
#define BINARY_PREDICATE(OP, F) \
template <typename T> \
@@ -682,7 +684,23 @@ generic_wide_int <storage>::sign_mask ()
return this->get_val ()[this->get_len () - 1] < 0 ? -1 : 0;
}
-/* Return the value of the least-significant explicitly-encoded block. */
+/* Return the signed value of the least-significant explicitly-encoded block. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::slow () const
+{
+ return this->get_val ()[0];
+}
+
+/* Return the signed value of the most-significant explicitly-encoded block. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::shigh () const
+{
+ return this->get_val ()[this->get_len () - 1];
+}
+
+/* Return the unsigned value of the least-significant explicitly-encoded block. */
template <typename storage>
inline unsigned HOST_WIDE_INT
generic_wide_int <storage>::ulow () const
@@ -690,7 +708,7 @@ generic_wide_int <storage>::ulow () cons
return this->get_val ()[0];
}
-/* Return the value of the most-significant explicitly-encoded block. */
+/* Return the unsigned value of the most-significant explicitly-encoded block. */
template <typename storage>
inline unsigned HOST_WIDE_INT
generic_wide_int <storage>::uhigh () const
@@ -1294,7 +1312,7 @@ decompose (HOST_WIDE_INT *scratch, unsig
namespace wi
{
bool eq_p_large (const HOST_WIDE_INT *, unsigned int,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
+ const HOST_WIDE_INT *, unsigned int);
bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
const HOST_WIDE_INT *, unsigned int, unsigned int);
bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
@@ -1400,9 +1418,9 @@ wi::fits_uhwi_p (const wide_int_ref &x)
if (x.precision <= HOST_BITS_PER_WIDE_INT)
return true;
if (x.len == 1)
- return x.sign_mask () == 0;
+ return x.get_val ()[0] >= 0;
if (x.precision < 2 * HOST_BITS_PER_WIDE_INT)
- return zext_hwi (x.uhigh (), x.precision % HOST_BITS_PER_WIDE_INT) == 0;
+ return x.uhigh () == 0;
return x.len == 2 && x.uhigh () == 0;
}
@@ -1415,9 +1433,7 @@ wi::neg_p (const wide_int_ref &x, signop
return false;
if (x.precision == 0)
return false;
- if (x.len * HOST_BITS_PER_WIDE_INT > x.precision)
- return (x.uhigh () >> (x.precision % HOST_BITS_PER_WIDE_INT - 1)) & 1;
- return x.sign_mask () < 0;
+ return x.shigh () < 0;
}
/* Return -1 if the top bit of X is set and 0 if the top bit is clear. */
@@ -1438,11 +1454,9 @@ wi::eq_p (const T1 &x, const T2 &y)
wide_int_ref xi (x, precision);
wide_int_ref yi (y, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
- {
- unsigned HOST_WIDE_INT diff = xi.ulow () ^ yi.ulow ();
- return (diff << (HOST_BITS_PER_WIDE_INT - precision)) == 0;
- }
- return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision);
+ return xi.ulow () == yi.ulow ();
+ else
+ return eq_p_large (xi.val, xi.len, yi.val, yi.len);
}
/* Return true if X != Y. X and Y must be binary-compatible. */
@@ -1459,13 +1473,10 @@ wi::lts_p (const wide_int_ref &x, const
{
if (x.precision <= HOST_BITS_PER_WIDE_INT
&& y.precision <= HOST_BITS_PER_WIDE_INT)
- {
- HOST_WIDE_INT xl = sext_hwi (x.ulow (), x.precision);
- HOST_WIDE_INT yl = sext_hwi (y.ulow (), y.precision);
- return xl < yl;
- }
- return lts_p_large (x.val, x.len, x.precision, y.val, y.len,
- y.precision);
+ return x.slow () < y.slow ();
+ else
+ return lts_p_large (x.val, x.len, x.precision, y.val, y.len,
+ y.precision);
}
/* Return true if X < Y when both are treated as unsigned values. */
@@ -1479,7 +1490,8 @@ wi::ltu_p (const wide_int_ref &x, const
unsigned HOST_WIDE_INT yl = zext_hwi (y.ulow (), y.precision);
return xl < yl;
}
- return ltu_p_large (x.val, x.len, x.precision, y.val, y.len, y.precision);
+ else
+ return ltu_p_large (x.val, x.len, x.precision, y.val, y.len, y.precision);
}
/* Return true if X < Y. Signedness of X and Y is indicated by SGN. */
@@ -1572,8 +1584,8 @@ wi::cmps (const wide_int_ref &x, const w
if (x.precision <= HOST_BITS_PER_WIDE_INT
&& y.precision <= HOST_BITS_PER_WIDE_INT)
{
- HOST_WIDE_INT xl = sext_hwi (x.ulow (), x.precision);
- HOST_WIDE_INT yl = sext_hwi (y.ulow (), y.precision);
+ HOST_WIDE_INT xl = x.slow ();
+ HOST_WIDE_INT yl = y.slow ();
if (xl < yl)
return -1;
else if (xl > yl)
@@ -1851,7 +1863,7 @@ wi::bit_or (const T1 &x, const T2 &y)
unsigned int precision = get_precision (result);
wide_int_ref xi (x, precision);
wide_int_ref yi (y, precision);
- if (precision <= HOST_BITS_PER_WIDE_INT)
+ if (xi.len + yi.len == 2)
{
val[0] = xi.ulow () | yi.ulow ();
result.set_len (1);
@@ -1911,7 +1923,7 @@ wi::add (const T1 &x, const T2 &y)
wide_int_ref yi (y, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
- val[0] = xi.ulow () + yi.ulow ();
+ val[0] = sext_hwi (xi.ulow () + yi.ulow (), precision);
result.set_len (1);
}
else
@@ -1942,7 +1954,7 @@ wi::add (const T1 &x, const T2 &y, signo
else
*overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
< (xl << (HOST_BITS_PER_WIDE_INT - precision)));
- val[0] = resultl;
+ val[0] = sext_hwi (resultl, precision);
result.set_len (1);
}
else
@@ -1962,7 +1974,7 @@ wi::sub (const T1 &x, const T2 &y)
wide_int_ref yi (y, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
- val[0] = xi.ulow () - yi.ulow ();
+ val[0] = sext_hwi (xi.ulow () - yi.ulow (), precision);
result.set_len (1);
}
else
@@ -1993,7 +2005,7 @@ wi::sub (const T1 &x, const T2 &y, signo
else
*overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
> (xl << (HOST_BITS_PER_WIDE_INT - precision)));
- val[0] = resultl;
+ val[0] = sext_hwi (resultl, precision);
result.set_len (1);
}
else
@@ -2013,7 +2025,7 @@ wi::mul (const T1 &x, const T2 &y)
wide_int_ref yi (y, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
- val[0] = xi.ulow () * yi.ulow ();
+ val[0] = sext_hwi (xi.ulow () * yi.ulow (), precision);
result.set_len (1);
}
else
@@ -2430,7 +2442,7 @@ wi::lshift (const T &x, const wide_int_r
}
else if (precision <= HOST_BITS_PER_WIDE_INT)
{
- val[0] = xi.ulow () << shift;
+ val[0] = sext_hwi (xi.ulow () << shift, precision);
result.set_len (1);
}
else
@@ -2485,8 +2497,7 @@ wi::arshift (const T &x, const wide_int_
}
else if (xi.precision <= HOST_BITS_PER_WIDE_INT)
{
- val[0] = sext_hwi (zext_hwi (xi.ulow (), xi.precision) >> shift,
- xi.precision - shift);
+ val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift);
result.set_len (1);
}
else