From patchwork Mon Sep 9 18:14:49 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kenneth Zadeck X-Patchwork-Id: 273628 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (Client CN "www.sourceware.org", Issuer "StartCom Class 1 Primary Intermediate Server CA" (not verified)) by ozlabs.org (Postfix) with ESMTPS id BDDB82C0102 for ; Tue, 10 Sep 2013 04:15:03 +1000 (EST) DomainKey-Signature: a=rsa-sha1; c=nofws; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender :message-id:date:from:mime-version:to:subject:content-type; q= dns; s=default; b=nfDt9GMTixlOwtjJviNlU10nMf3OcPk4mqQ3e668r+b0gv yATEQhDgCkl+YhXmvbYdEBDBYoGlcVrOEdH9iQGQUcnPasKDqSm8jx7ww5h3MWUD GtKQF9+a+PqzVSio5se4lUY7z7y3IUGN2syo30ONBOQg2LD26gL+IKADn9Zlw= DKIM-Signature: v=1; a=rsa-sha1; c=relaxed; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender :message-id:date:from:mime-version:to:subject:content-type; s= default; bh=WMJ6bSDiQa/47SA5b02WoeTJlDs=; b=O1qCavG7tPmPZu1H6nGA PMvIsF205BeifcTaBU8ubcpT+fpYMY9cJ5C/csZuTY2aZIm/vyvymTS304c7Owf3 g62zUcbddKUm9jgITk5QFpag/fXmy6B4tGeMvXTwC13fCttyHugA6RrZt87NKW8Q A8DLOUBYqPlE/mSlvFi+ayY= Received: (qmail 3453 invoked by alias); 9 Sep 2013 18:14:56 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Delivered-To: mailing list gcc-patches@gcc.gnu.org Received: (qmail 3439 invoked by uid 89); 9 Sep 2013 18:14:55 -0000 Received: from mail-vc0-f173.google.com (HELO mail-vc0-f173.google.com) (209.85.220.173) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with (AES128-SHA encrypted) ESMTPS; Mon, 09 Sep 2013 18:14:55 +0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-2.9 required=5.0 tests=ALL_TRUSTED, AWL, BAYES_00 autolearn=ham version=3.3.2 X-HELO: mail-vc0-f173.google.com Received: by mail-vc0-f173.google.com with SMTP id id13so4123821vcb.4 for ; Mon, 09 Sep 2013 11:14:51 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:message-id:date:from:user-agent:mime-version:to :subject:content-type; bh=987rvMjxb/lD3BrnIvkzvtaqdCds9292a/SFmw7jxV8=; b=ii2g3onw+OmT8syDcTvpAlXJ6295gzRG/NptLqnKYfmKW2o9PFvY6qwMVFlJg6+34B osiQEnHOLbfc2Zfd5YSuGeUBszK7hS4OUUZSjniq2B+7KPcmFyYca/eUZrsf22RxW0gR wYqLCkKnpyrWuc9Emmw7OBeuId945JXMRcIMz7bGGAzKHIoRp1xdApS8sbvbdnhIvUUA wKWCKRZ4bIqP71/ime7VcUWOaVKN8ZW4jGgTYxgvuPDskBQ0GEAG52Eg+DkAEfCpFhqT VX+E/Mi6Uj7ygUTBIrv2lV2Fe54BumnJ4/Q8UGZVvNFSo0/9dG1NqyvYB6xcU7hPfGzz jMGw== X-Gm-Message-State: ALoCoQkp8r8p7aVzXxwQnuLAPxJJ2z75XAOGY+512cimR9kUs75DnVGv0WA9jkapYfiuvpH4pS65 X-Received: by 10.220.42.140 with SMTP id s12mr38013vce.33.1378750491401; Mon, 09 Sep 2013 11:14:51 -0700 (PDT) Received: from moria.site (pool-98-113-157-218.nycmny.fios.verizon.net. [98.113.157.218]) by mx.google.com with ESMTPSA id n10sm3453090vew.4.1969.12.31.16.00.00 (version=TLSv1 cipher=ECDHE-RSA-RC4-SHA bits=128/128); Mon, 09 Sep 2013 11:14:50 -0700 (PDT) Message-ID: <522E1019.40302@naturalbridge.com> Date: Mon, 09 Sep 2013 14:14:49 -0400 From: Kenneth Zadeck User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20130801 Thunderbird/17.0.8 MIME-Version: 1.0 To: Richard Biener , Richard Sandiford , Mike Stump , gcc-patches Subject: patch to make wide-int assume that everything is canonical beyond the precision? i still have more testing to go on this but it seems to be ok. my current problem is that the branch seems to have a fair number of failures so i want to get that cleaned up before i do further testing and commit this. I left the old rtl constructor ifdefed out. We are likely to want them back soon, given how will richards work on cleaning up the rtl seems to have gone. Kenny Index: gcc/rtl.h =================================================================== --- gcc/rtl.h (revision 202389) +++ gcc/rtl.h (working copy) @@ -1422,6 +1422,7 @@ wi::int_traits ::get_precisi return GET_MODE_PRECISION (x.second); } +#if 0 inline wi::storage_ref wi::int_traits ::decompose (HOST_WIDE_INT *, unsigned int precision, @@ -1437,13 +1438,72 @@ wi::int_traits ::decompose ( return wi::storage_ref (&CONST_WIDE_INT_ELT (x.first, 0), CONST_WIDE_INT_NUNITS (x.first), precision); +#if TARGET_SUPPORTS_WIDE_INT != 0 case CONST_DOUBLE: return wi::storage_ref (&CONST_DOUBLE_LOW (x.first), 2, precision); +#endif default: gcc_unreachable (); } } +#else +/* For now, assume that the storage is not canonical, i.e. that there + are bits above the precision that are not all zeros or all ones. + If this is fixed in rtl, then we will not need the calls to + force_to_size. */ +inline wi::storage_ref +wi::int_traits ::decompose (HOST_WIDE_INT *scratch, + unsigned int precision, + const rtx_mode_t &x) +{ + int len; + int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); + int blocks_needed = (precision + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT; + + gcc_checking_assert (precision == get_precision (x)); + switch (GET_CODE (x.first)) + { + case CONST_INT: + len = 1; + if (small_prec) + scratch[0] = sext_hwi (INTVAL (x.first), precision); + else + scratch = &INTVAL (x.first); + break; + + case CONST_WIDE_INT: + len = CONST_WIDE_INT_NUNITS (x.first); + if (small_prec && blocks_needed == len - 1) + { + int i; + for (i = 0; i < len - 1; i++) + scratch[i] = CONST_WIDE_INT_ELT (x.first, i); + scratch[len - 1] = sext_hwi (CONST_WIDE_INT_ELT (x.first, i), small_prec); + } + else + scratch = &CONST_WIDE_INT_ELT (x.first, 0); + break; + +#if TARGET_SUPPORTS_WIDE_INT == 0 + case CONST_DOUBLE: + len = 2; + if (small_prec) + { + scratch[0] = CONST_DOUBLE_LOW (x.first); + scratch[1] = sext_hwi (CONST_DOUBLE_HIGH (x.first), small_prec); + } + else + scratch = &CONST_DOUBLE_LOW (x.first); + break; +#endif + + default: + gcc_unreachable (); + } + return wi::storage_ref (scratch, len, precision); +} +#endif namespace wi { Index: gcc/wide-int.cc =================================================================== --- gcc/wide-int.cc (revision 202389) +++ gcc/wide-int.cc (working copy) @@ -48,6 +48,9 @@ static const HOST_WIDE_INT zeros[WIDE_IN (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1) #define SIGN_MASK(X) (((HOST_WIDE_INT)X) >> (HOST_BITS_PER_WIDE_INT - 1)) +/* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1 + based on the top existing bit of VAL. */ + static unsigned HOST_WIDE_INT safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i) { @@ -304,10 +307,10 @@ wi::force_to_size (HOST_WIDE_INT *val, c if (precision > xprecision) { /* Expanding. */ - unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT; - if (sgn == UNSIGNED) { + unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT; + if (small_xprecision && len == BLOCKS_NEEDED (xprecision)) val[len - 1] = zext_hwi (val[len - 1], small_xprecision); else if (val[len - 1] < 0) @@ -320,11 +323,6 @@ wi::force_to_size (HOST_WIDE_INT *val, c val[len++] = 0; } } - /* We have to do this because we cannot guarantee that there is - not trash in the top block of an uncompressed value. For a - compressed value, all the bits are significant. */ - else if (small_xprecision && len == BLOCKS_NEEDED (xprecision)) - val[len - 1] = sext_hwi (val[len - 1], small_xprecision); } else if (precision < xprecision) /* Contracting. */ @@ -352,27 +350,18 @@ selt (const HOST_WIDE_INT *a, unsigned i return 0; } - if (small_prec && index == blocks_needed - 1) - { - /* The top block is partially outside of the precision. */ - if (sgn == SIGNED) - return sext_hwi (a[index], small_prec); - else - return zext_hwi (a[index], small_prec); - } - return a[index]; + if (sgn == UNSIGNED && small_prec && index == blocks_needed - 1) + return zext_hwi (a[index], small_prec); + else + return a[index]; } -/* Find the hignest bit represented in a wide int. This will in +/* Find the highest bit represented in a wide int. This will in general have the same value as the sign bit. */ static inline HOST_WIDE_INT -top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec) +top_bit_of (const HOST_WIDE_INT *a, unsigned int len) { - if (len == BLOCKS_NEEDED (prec) - && (prec & (HOST_BITS_PER_WIDE_INT - 1))) - return (a[len - 1] >> (prec & (HOST_BITS_PER_WIDE_INT - 1))) & 1; - else - return (a[len - 1] >> (HOST_BITS_PER_WIDE_INT - 1)) & 1; + return (a[len - 1] >> (HOST_BITS_PER_WIDE_INT - 1)) & 1; } /* @@ -384,24 +373,13 @@ top_bit_of (const HOST_WIDE_INT *a, unsi /* Return true if OP0 == OP1. */ bool wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, - const HOST_WIDE_INT *op1, unsigned int op1len, - unsigned int prec) + const HOST_WIDE_INT *op1, unsigned int op1len) { int l0 = op0len - 1; - unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); while (op0len != op1len) return false; - if (op0len == BLOCKS_NEEDED (prec) && small_prec) - { - /* It does not matter if we zext or sext here, we just have to - do both the same way. */ - if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec)) - return false; - l0--; - } - while (l0 >= 0) if (op0[l0] != op1[l0]) return false; @@ -658,7 +636,7 @@ wi::set_bit_large (HOST_WIDE_INT *val, c /* If the bit we just set is at the msb of the block, make sure that any higher bits are zeros. */ - if (bit + 1 < precision && bit == HOST_BITS_PER_WIDE_INT - 1) + if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1) val[len++] = 0; return len; } @@ -821,7 +799,7 @@ wi::and_large (HOST_WIDE_INT *val, const unsigned int len = MAX (op0len, op1len); if (l0 > l1) { - HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len); if (op1mask == 0) { l0 = l1; @@ -839,7 +817,7 @@ wi::and_large (HOST_WIDE_INT *val, const } else if (l1 > l0) { - HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len); if (op0mask == 0) len = l0 + 1; else @@ -879,7 +857,7 @@ wi::and_not_large (HOST_WIDE_INT *val, c unsigned int len = MAX (op0len, op1len); if (l0 > l1) { - HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len); if (op1mask != 0) { l0 = l1; @@ -897,7 +875,7 @@ wi::and_not_large (HOST_WIDE_INT *val, c } else if (l1 > l0) { - HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len); if (op0mask == 0) len = l0 + 1; else @@ -937,7 +915,7 @@ wi::or_large (HOST_WIDE_INT *val, const unsigned int len = MAX (op0len, op1len); if (l0 > l1) { - HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len); if (op1mask != 0) { l0 = l1; @@ -955,7 +933,7 @@ wi::or_large (HOST_WIDE_INT *val, const } else if (l1 > l0) { - HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len); if (op0mask != 0) len = l0 + 1; else @@ -995,7 +973,7 @@ wi::or_not_large (HOST_WIDE_INT *val, co unsigned int len = MAX (op0len, op1len); if (l0 > l1) { - HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len); if (op1mask == 0) { l0 = l1; @@ -1013,7 +991,7 @@ wi::or_not_large (HOST_WIDE_INT *val, co } else if (l1 > l0) { - HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len); if (op0mask != 0) len = l0 + 1; else @@ -1052,7 +1030,7 @@ wi::xor_large (HOST_WIDE_INT *val, const unsigned int len = MAX (op0len, op1len); if (l0 > l1) { - HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len); while (l0 > l1) { val[l0] = op0[l0] ^ op1mask; @@ -1062,7 +1040,7 @@ wi::xor_large (HOST_WIDE_INT *val, const if (l1 > l0) { - HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len); while (l1 > l0) { val[l1] = op0mask ^ op1[l1]; @@ -1101,8 +1079,8 @@ wi::add_large (HOST_WIDE_INT *val, const unsigned int i, small_prec; unsigned int len = MAX (op0len, op1len); - mask0 = -top_bit_of (op0, op0len, prec); - mask1 = -top_bit_of (op1, op1len, prec); + mask0 = -top_bit_of (op0, op0len); + mask1 = -top_bit_of (op1, op1len); /* Add all of the explicitly defined elements. */ for (i = 0; i < len; i++) @@ -1142,6 +1120,7 @@ wi::add_large (HOST_WIDE_INT *val, const } } + /* Canonize the top of the top block. */ small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); if (small_prec != 0 && BLOCKS_NEEDED (prec) == len) { @@ -1211,7 +1190,7 @@ wi_unpack (unsigned HOST_HALF_WIDE_INT * if (sgn == SIGNED) { - mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec); + mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len); mask &= HALF_INT_MASK; } else @@ -1501,8 +1480,8 @@ wi::sub_large (HOST_WIDE_INT *val, const unsigned int i, small_prec; unsigned int len = MAX (op0len, op1len); - mask0 = -top_bit_of (op0, op0len, prec); - mask1 = -top_bit_of (op1, op1len, prec); + mask0 = -top_bit_of (op0, op0len); + mask1 = -top_bit_of (op1, op1len); /* Subtract all of the explicitly defined elements. */ for (i = 0; i < len; i++) @@ -1541,7 +1520,7 @@ wi::sub_large (HOST_WIDE_INT *val, const } } - + /* Canonize the top of the top block. */ small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); if (small_prec != 0 && BLOCKS_NEEDED (prec) == len) { @@ -1790,14 +1769,14 @@ wi::divmod_internal (HOST_WIDE_INT *quot did. */ if (sgn == SIGNED) { - if (top_bit_of (dividend, dividend_len, dividend_prec)) + if (top_bit_of (dividend, dividend_len)) { dividend_len = wi::sub_large (u0, zeros, 1, dividend, dividend_len, dividend_prec, UNSIGNED, 0); dividend = u0; dividend_neg = true; } - if (top_bit_of (divisor, divisor_len, divisor_prec)) + if (top_bit_of (divisor, divisor_len)) { divisor_len = wi::sub_large (u1, zeros, 1, divisor, divisor_len, divisor_prec, UNSIGNED, 0); @@ -1811,12 +1790,12 @@ wi::divmod_internal (HOST_WIDE_INT *quot wi_unpack (b_divisor, (const unsigned HOST_WIDE_INT*)divisor, divisor_len, divisor_blocks_needed, divisor_prec, sgn); - if (top_bit_of (dividend, dividend_len, dividend_prec) && sgn == SIGNED) + if (top_bit_of (dividend, dividend_len) && sgn == SIGNED) m = dividend_blocks_needed; else m = 2 * dividend_len; - if (top_bit_of (divisor, divisor_len, divisor_prec) && sgn == SIGNED) + if (top_bit_of (divisor, divisor_len) && sgn == SIGNED) n = divisor_blocks_needed; else n = 2 * divisor_len; Index: gcc/wide-int.h =================================================================== --- gcc/wide-int.h (revision 202389) +++ gcc/wide-int.h (working copy) @@ -567,6 +567,8 @@ public: HOST_WIDE_INT elt (unsigned int) const; unsigned HOST_WIDE_INT ulow () const; unsigned HOST_WIDE_INT uhigh () const; + HOST_WIDE_INT slow () const; + HOST_WIDE_INT shigh () const; #define BINARY_PREDICATE(OP, F) \ template \ @@ -682,7 +684,23 @@ generic_wide_int ::sign_mask () return this->get_val ()[this->get_len () - 1] < 0 ? -1 : 0; } -/* Return the value of the least-significant explicitly-encoded block. */ +/* Return the signed value of the least-significant explicitly-encoded block. */ +template +inline HOST_WIDE_INT +generic_wide_int ::slow () const +{ + return this->get_val ()[0]; +} + +/* Return the signed value of the most-significant explicitly-encoded block. */ +template +inline HOST_WIDE_INT +generic_wide_int ::shigh () const +{ + return this->get_val ()[this->get_len () - 1]; +} + +/* Return the unsigned value of the least-significant explicitly-encoded block. */ template inline unsigned HOST_WIDE_INT generic_wide_int ::ulow () const @@ -690,7 +708,7 @@ generic_wide_int ::ulow () cons return this->get_val ()[0]; } -/* Return the value of the most-significant explicitly-encoded block. */ +/* Return the unsigned value of the most-significant explicitly-encoded block. */ template inline unsigned HOST_WIDE_INT generic_wide_int ::uhigh () const @@ -1294,7 +1312,7 @@ decompose (HOST_WIDE_INT *scratch, unsig namespace wi { bool eq_p_large (const HOST_WIDE_INT *, unsigned int, - const HOST_WIDE_INT *, unsigned int, unsigned int); + const HOST_WIDE_INT *, unsigned int); bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int); bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, @@ -1400,9 +1418,9 @@ wi::fits_uhwi_p (const wide_int_ref &x) if (x.precision <= HOST_BITS_PER_WIDE_INT) return true; if (x.len == 1) - return x.sign_mask () == 0; + return x.get_val ()[0] >= 0; if (x.precision < 2 * HOST_BITS_PER_WIDE_INT) - return zext_hwi (x.uhigh (), x.precision % HOST_BITS_PER_WIDE_INT) == 0; + return x.uhigh () == 0; return x.len == 2 && x.uhigh () == 0; } @@ -1415,9 +1433,7 @@ wi::neg_p (const wide_int_ref &x, signop return false; if (x.precision == 0) return false; - if (x.len * HOST_BITS_PER_WIDE_INT > x.precision) - return (x.uhigh () >> (x.precision % HOST_BITS_PER_WIDE_INT - 1)) & 1; - return x.sign_mask () < 0; + return x.shigh () < 0; } /* Return -1 if the top bit of X is set and 0 if the top bit is clear. */ @@ -1438,11 +1454,9 @@ wi::eq_p (const T1 &x, const T2 &y) wide_int_ref xi (x, precision); wide_int_ref yi (y, precision); if (precision <= HOST_BITS_PER_WIDE_INT) - { - unsigned HOST_WIDE_INT diff = xi.ulow () ^ yi.ulow (); - return (diff << (HOST_BITS_PER_WIDE_INT - precision)) == 0; - } - return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision); + return xi.ulow () == yi.ulow (); + else + return eq_p_large (xi.val, xi.len, yi.val, yi.len); } /* Return true if X != Y. X and Y must be binary-compatible. */ @@ -1459,13 +1473,10 @@ wi::lts_p (const wide_int_ref &x, const { if (x.precision <= HOST_BITS_PER_WIDE_INT && y.precision <= HOST_BITS_PER_WIDE_INT) - { - HOST_WIDE_INT xl = sext_hwi (x.ulow (), x.precision); - HOST_WIDE_INT yl = sext_hwi (y.ulow (), y.precision); - return xl < yl; - } - return lts_p_large (x.val, x.len, x.precision, y.val, y.len, - y.precision); + return x.slow () < y.slow (); + else + return lts_p_large (x.val, x.len, x.precision, y.val, y.len, + y.precision); } /* Return true if X < Y when both are treated as unsigned values. */ @@ -1479,7 +1490,8 @@ wi::ltu_p (const wide_int_ref &x, const unsigned HOST_WIDE_INT yl = zext_hwi (y.ulow (), y.precision); return xl < yl; } - return ltu_p_large (x.val, x.len, x.precision, y.val, y.len, y.precision); + else + return ltu_p_large (x.val, x.len, x.precision, y.val, y.len, y.precision); } /* Return true if X < Y. Signedness of X and Y is indicated by SGN. */ @@ -1572,8 +1584,8 @@ wi::cmps (const wide_int_ref &x, const w if (x.precision <= HOST_BITS_PER_WIDE_INT && y.precision <= HOST_BITS_PER_WIDE_INT) { - HOST_WIDE_INT xl = sext_hwi (x.ulow (), x.precision); - HOST_WIDE_INT yl = sext_hwi (y.ulow (), y.precision); + HOST_WIDE_INT xl = x.slow (); + HOST_WIDE_INT yl = y.slow (); if (xl < yl) return -1; else if (xl > yl) @@ -1851,7 +1863,7 @@ wi::bit_or (const T1 &x, const T2 &y) unsigned int precision = get_precision (result); wide_int_ref xi (x, precision); wide_int_ref yi (y, precision); - if (precision <= HOST_BITS_PER_WIDE_INT) + if (xi.len + yi.len == 2) { val[0] = xi.ulow () | yi.ulow (); result.set_len (1); @@ -1911,7 +1923,7 @@ wi::add (const T1 &x, const T2 &y) wide_int_ref yi (y, precision); if (precision <= HOST_BITS_PER_WIDE_INT) { - val[0] = xi.ulow () + yi.ulow (); + val[0] = sext_hwi (xi.ulow () + yi.ulow (), precision); result.set_len (1); } else @@ -1942,7 +1954,7 @@ wi::add (const T1 &x, const T2 &y, signo else *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision)) < (xl << (HOST_BITS_PER_WIDE_INT - precision))); - val[0] = resultl; + val[0] = sext_hwi (resultl, precision); result.set_len (1); } else @@ -1962,7 +1974,7 @@ wi::sub (const T1 &x, const T2 &y) wide_int_ref yi (y, precision); if (precision <= HOST_BITS_PER_WIDE_INT) { - val[0] = xi.ulow () - yi.ulow (); + val[0] = sext_hwi (xi.ulow () - yi.ulow (), precision); result.set_len (1); } else @@ -1993,7 +2005,7 @@ wi::sub (const T1 &x, const T2 &y, signo else *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision)) > (xl << (HOST_BITS_PER_WIDE_INT - precision))); - val[0] = resultl; + val[0] = sext_hwi (resultl, precision); result.set_len (1); } else @@ -2013,7 +2025,7 @@ wi::mul (const T1 &x, const T2 &y) wide_int_ref yi (y, precision); if (precision <= HOST_BITS_PER_WIDE_INT) { - val[0] = xi.ulow () * yi.ulow (); + val[0] = sext_hwi (xi.ulow () * yi.ulow (), precision); result.set_len (1); } else @@ -2430,7 +2442,7 @@ wi::lshift (const T &x, const wide_int_r } else if (precision <= HOST_BITS_PER_WIDE_INT) { - val[0] = xi.ulow () << shift; + val[0] = sext_hwi (xi.ulow () << shift, precision); result.set_len (1); } else @@ -2485,8 +2497,7 @@ wi::arshift (const T &x, const wide_int_ } else if (xi.precision <= HOST_BITS_PER_WIDE_INT) { - val[0] = sext_hwi (zext_hwi (xi.ulow (), xi.precision) >> shift, - xi.precision - shift); + val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift); result.set_len (1); } else