From patchwork Sat Nov 30 10:17:06 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Richard Sandiford X-Patchwork-Id: 295558 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id ABE692C009D for ; Sat, 30 Nov 2013 21:17:29 +1100 (EST) DomainKey-Signature: a=rsa-sha1; c=nofws; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender:from :to:cc:subject:date:message-id:mime-version:content-type; q=dns; s=default; b=dNiBHyE8IlnBka6JHR1Os3no5m1blBdnpOZv9oLO+RmUobYY87 pe75SX8t5gdO144O5dCDM+hJC7C6/YXLK9MQtMWQfW1PWUl8jwKmx68j3H+yfkve 1Plw6E/Uw5Fq8SWI/kykQ2iZ9vzDfc1Xh3XYx7B8lP6HIgxtsbA5Ejm7o= DKIM-Signature: v=1; a=rsa-sha1; c=relaxed; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender:from :to:cc:subject:date:message-id:mime-version:content-type; s= default; bh=KJ1QKOVjt77rEo8miYXHHSqMgcY=; b=YIx1KDYViDIDh/ykuLkQ zgmeJM5JWIKpqXZO/zhnf18aZt7lgriXaIOy0uS20OC2ZyDmuxT35AqIHvE6K8Jx 9Zlqjv0co0SxqPnf6gEuLPo3BHa0D8Vyb27QHjj6NG5ykLO3MCVhtnrbiU4exCe9 pzSv09w8+eRbHAgMk1gyfQA= Received: (qmail 14845 invoked by alias); 30 Nov 2013 10:17:19 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Delivered-To: mailing list gcc-patches@gcc.gnu.org Received: (qmail 14834 invoked by uid 89); 30 Nov 2013 10:17:18 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=0.6 required=5.0 tests=AWL, BAYES_50, FREEMAIL_FROM, RDNS_NONE, SPF_PASS, URIBL_BLOCKED autolearn=no version=3.3.2 X-HELO: mail-wi0-f180.google.com Received: from Unknown (HELO mail-wi0-f180.google.com) (209.85.212.180) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with (AES128-SHA encrypted) ESMTPS; Sat, 30 Nov 2013 10:17:17 +0000 Received: by mail-wi0-f180.google.com with SMTP id hm4so2924974wib.1 for ; Sat, 30 Nov 2013 02:17:08 -0800 (PST) X-Received: by 10.180.90.114 with SMTP id bv18mr10390826wib.16.1385806628004; Sat, 30 Nov 2013 02:17:08 -0800 (PST) Received: from localhost ([2.28.235.51]) by mx.google.com with ESMTPSA id o9sm31372546wib.10.2013.11.30.02.17.06 for (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Sat, 30 Nov 2013 02:17:07 -0800 (PST) From: Richard Sandiford To: gcc-patches@gcc.gnu.org Mail-Followup-To: gcc-patches@gcc.gnu.org, zadeck@naturalbridge.com, mikestump@comcast.net, rdsandiford@googlemail.com Cc: zadeck@naturalbridge.com, mikestump@comcast.net Subject: [wide-int] Use __builtin_expect for length checks Date: Sat, 30 Nov 2013 10:17:06 +0000 Message-ID: <87txeu5h3x.fsf@talisman.default> User-Agent: Gnus/5.13 (Gnus v5.13) Emacs/24.3 (gnu/linux) MIME-Version: 1.0 Without profiling information, GCC tends to assume "x == 1" and "x + y == 2" are likely false, so this patch adds some __builtin_expects. (system.h has a dummy definition for compilers that don't support __builtin_expect.) Tested on x86_64-linux-gnu. OK to install? Thanks, Richard Index: gcc/wide-int.h =================================================================== --- gcc/wide-int.h 2013-11-30 09:40:32.710196218 +0000 +++ gcc/wide-int.h 2013-11-30 10:07:06.567433289 +0000 @@ -1675,7 +1675,7 @@ wi::eq_p (const T1 &x, const T2 &y) while (++i != xi.len); return true; } - if (yi.len == 1) + if (__builtin_expect (yi.len == 1, true)) { /* XI is only equal to YI if it too has a single HWI. */ if (xi.len != 1) @@ -1751,7 +1751,7 @@ wi::ltu_p (const T1 &x, const T2 &y) /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both values does not change the result. */ - if (xi.len + yi.len == 2) + if (__builtin_expect (xi.len + yi.len == 2, true)) { unsigned HOST_WIDE_INT xl = xi.to_uhwi (); unsigned HOST_WIDE_INT yl = yi.to_uhwi (); @@ -1922,7 +1922,7 @@ wi::cmpu (const T1 &x, const T2 &y) /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both values does not change the result. */ - if (xi.len + yi.len == 2) + if (__builtin_expect (xi.len + yi.len == 2, true)) { unsigned HOST_WIDE_INT xl = xi.to_uhwi (); unsigned HOST_WIDE_INT yl = yi.to_uhwi (); @@ -2128,7 +2128,7 @@ wi::bit_and (const T1 &x, const T2 &y) WIDE_INT_REF_FOR (T1) xi (x, precision); WIDE_INT_REF_FOR (T2) yi (y, precision); bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; - if (xi.len + yi.len == 2) + if (__builtin_expect (xi.len + yi.len == 2, true)) { val[0] = xi.ulow () & yi.ulow (); result.set_len (1, is_sign_extended); @@ -2149,7 +2149,7 @@ wi::bit_and_not (const T1 &x, const T2 & WIDE_INT_REF_FOR (T1) xi (x, precision); WIDE_INT_REF_FOR (T2) yi (y, precision); bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; - if (xi.len + yi.len == 2) + if (__builtin_expect (xi.len + yi.len == 2, true)) { val[0] = xi.ulow () & ~yi.ulow (); result.set_len (1, is_sign_extended); @@ -2170,7 +2170,7 @@ wi::bit_or (const T1 &x, const T2 &y) WIDE_INT_REF_FOR (T1) xi (x, precision); WIDE_INT_REF_FOR (T2) yi (y, precision); bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; - if (xi.len + yi.len == 2) + if (__builtin_expect (xi.len + yi.len == 2, true)) { val[0] = xi.ulow () | yi.ulow (); result.set_len (1, is_sign_extended); @@ -2191,7 +2191,7 @@ wi::bit_or_not (const T1 &x, const T2 &y WIDE_INT_REF_FOR (T1) xi (x, precision); WIDE_INT_REF_FOR (T2) yi (y, precision); bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; - if (xi.len + yi.len == 2) + if (__builtin_expect (xi.len + yi.len == 2, true)) { val[0] = xi.ulow () | ~yi.ulow (); result.set_len (1, is_sign_extended); @@ -2212,7 +2212,7 @@ wi::bit_xor (const T1 &x, const T2 &y) WIDE_INT_REF_FOR (T1) xi (x, precision); WIDE_INT_REF_FOR (T2) yi (y, precision); bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; - if (xi.len + yi.len == 2) + if (__builtin_expect (xi.len + yi.len == 2, true)) { val[0] = xi.ulow () ^ yi.ulow (); result.set_len (1, is_sign_extended); @@ -2248,7 +2248,7 @@ wi::add (const T1 &x, const T2 &y) HOST_BITS_PER_WIDE_INT are relatively rare and there's not much point handling them inline. */ else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT) - && xi.len + yi.len == 2) + && __builtin_expect (xi.len + yi.len == 2, true)) { unsigned HOST_WIDE_INT xl = xi.ulow (); unsigned HOST_WIDE_INT yl = yi.ulow (); @@ -2323,7 +2323,7 @@ wi::sub (const T1 &x, const T2 &y) HOST_BITS_PER_WIDE_INT are relatively rare and there's not much point handling them inline. */ else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT) - && xi.len + yi.len == 2) + && __builtin_expect (xi.len + yi.len == 2, true)) { unsigned HOST_WIDE_INT xl = xi.ulow (); unsigned HOST_WIDE_INT yl = yi.ulow ();