get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/2226770/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2226770,
    "url": "http://patchwork.ozlabs.org/api/patches/2226770/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/gcc/patch/bmm.hhup4p5wp2.gcc.gcc-TEST.pinskia.84.1.9@forge-stage.sourceware.org/",
    "project": {
        "id": 17,
        "url": "http://patchwork.ozlabs.org/api/projects/17/?format=api",
        "name": "GNU Compiler Collection",
        "link_name": "gcc",
        "list_id": "gcc-patches.gcc.gnu.org",
        "list_email": "gcc-patches@gcc.gnu.org",
        "web_url": null,
        "scm_url": null,
        "webscm_url": null,
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<bmm.hhup4p5wp2.gcc.gcc-TEST.pinskia.84.1.9@forge-stage.sourceware.org>",
    "list_archive_url": null,
    "date": "2026-04-22T18:49:17",
    "name": "[v1,09/10] fab/isel: Move atomic optimizations to isel from fab [PR121762]",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "13af0332ada070aebc727cf0276c8bdf18242d66",
    "submitter": {
        "id": 93219,
        "url": "http://patchwork.ozlabs.org/api/people/93219/?format=api",
        "name": "Andrew Pinski via Sourceware Forge",
        "email": "forge-bot+pinskia@forge-stage.sourceware.org"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/gcc/patch/bmm.hhup4p5wp2.gcc.gcc-TEST.pinskia.84.1.9@forge-stage.sourceware.org/mbox/",
    "series": [
        {
            "id": 501092,
            "url": "http://patchwork.ozlabs.org/api/series/501092/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/gcc/list/?series=501092",
            "date": "2026-04-22T18:49:11",
            "name": "remove_fab",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/501092/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/2226770/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2226770/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "gcc-patches@gcc.gnu.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@legolas.ozlabs.org",
            "gcc-patches@gcc.gnu.org"
        ],
        "Authentication-Results": [
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=gcc.gnu.org\n (client-ip=38.145.34.32; helo=vm01.sourceware.org;\n envelope-from=gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org;\n receiver=patchwork.ozlabs.org)",
            "sourceware.org; dmarc=none (p=none dis=none)\n header.from=forge-stage.sourceware.org",
            "sourceware.org;\n spf=pass smtp.mailfrom=forge-stage.sourceware.org",
            "server2.sourceware.org;\n arc=none smtp.remote-ip=38.145.34.39"
        ],
        "Received": [
            "from vm01.sourceware.org (vm01.sourceware.org [38.145.34.32])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature ECDSA (secp384r1) server-digest SHA384)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4g18Xw1f9Yz1yD5\n\tfor <incoming@patchwork.ozlabs.org>; Thu, 23 Apr 2026 05:33:52 +1000 (AEST)",
            "from vm01.sourceware.org (localhost [127.0.0.1])\n\tby sourceware.org (Postfix) with ESMTP id 6254842D33EB\n\tfor <incoming@patchwork.ozlabs.org>; Wed, 22 Apr 2026 19:33:50 +0000 (GMT)",
            "from forge-stage.sourceware.org (vm08.sourceware.org [38.145.34.39])\n by sourceware.org (Postfix) with ESMTPS id AEF4C40A2C45\n for <gcc-patches@gcc.gnu.org>; Wed, 22 Apr 2026 18:50:43 +0000 (GMT)",
            "from forge-stage.sourceware.org (localhost [IPv6:::1])\n (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n key-exchange x25519 server-signature ECDSA (prime256v1) server-digest SHA256)\n (No client certificate requested)\n by forge-stage.sourceware.org (Postfix) with ESMTPS id F026B43598\n for <gcc-patches@gcc.gnu.org>; Wed, 22 Apr 2026 18:50:41 +0000 (UTC)"
        ],
        "DKIM-Filter": [
            "OpenDKIM Filter v2.11.0 sourceware.org 6254842D33EB",
            "OpenDKIM Filter v2.11.0 sourceware.org AEF4C40A2C45"
        ],
        "DMARC-Filter": "OpenDMARC Filter v1.4.2 sourceware.org AEF4C40A2C45",
        "ARC-Filter": "OpenARC Filter v1.0.0 sourceware.org AEF4C40A2C45",
        "ARC-Seal": "i=1; a=rsa-sha256; d=sourceware.org; s=key; t=1776883843; cv=none;\n b=BWpWDgoYkZLdmtRl458KwI1CGrGzukcOoXjQALY6N9Wr8lqHg7RA7vKoKxyM2b85tsUs/Jw+X0LkzpzXrvOUhfK/98mWXRjyUbwgTRxcjVJjeEBdB42EzFBZ6Nb0QF0x51/SeudXxrGbP4emzyoGOXCjULty7FEih7ahIQsSSfg=",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; d=sourceware.org; s=key;\n t=1776883843; c=relaxed/simple;\n bh=x9/yg3T+VrWsLU/joVOOiNhT1cauc9IpPZ8Lc50xKi0=;\n h=From:Date:Subject:To:Message-ID;\n b=dlucTZZLak1jBfoLo23ZoQD7p/Qj+XU4AHjfpEZAiJPcY8WSALJ0O9YnTQ2bS7/J+6oFPCbUlm0kaqgSoKIFN12WZwD+Zwhllyp9O3p+46hOs2AxQJCYjPUDYGfe0n0Y2Gacu3E8sp/PQZHOBbcN9vMBviZ9D38GL0wp9fn4Zhw=",
        "ARC-Authentication-Results": "i=1; server2.sourceware.org",
        "From": "Andrew Pinski via Sourceware Forge\n <forge-bot+pinskia@forge-stage.sourceware.org>",
        "Date": "Wed, 22 Apr 2026 18:49:17 +0000",
        "Subject": "[PATCH v1 09/10] fab/isel: Move atomic optimizations to isel from fab\n [PR121762]",
        "To": "gcc-patches mailing list <gcc-patches@gcc.gnu.org>",
        "Message-ID": "\n <bmm.hhup4p5wp2.gcc.gcc-TEST.pinskia.84.1.9@forge-stage.sourceware.org>",
        "X-Mailer": "batrachomyomachia",
        "X-Pull-Request-Organization": "gcc",
        "X-Pull-Request-Repository": "gcc-TEST",
        "X-Pull-Request": "https://forge.sourceware.org/gcc/gcc-TEST/pulls/84",
        "References": "\n <bmm.hhup4p5wp2.gcc.gcc-TEST.pinskia.84.1.0@forge-stage.sourceware.org>",
        "In-Reply-To": "\n <bmm.hhup4p5wp2.gcc.gcc-TEST.pinskia.84.1.0@forge-stage.sourceware.org>",
        "X-Patch-URL": "\n https://forge.sourceware.org/pinskia/gcc-TEST/commit/84783d181fef8efe58364a6dcd4e1a170f4febd9",
        "X-BeenThere": "gcc-patches@gcc.gnu.org",
        "X-Mailman-Version": "2.1.30",
        "Precedence": "list",
        "List-Id": "Gcc-patches mailing list <gcc-patches.gcc.gnu.org>",
        "List-Unsubscribe": "<https://gcc.gnu.org/mailman/options/gcc-patches>,\n <mailto:gcc-patches-request@gcc.gnu.org?subject=unsubscribe>",
        "List-Archive": "<https://gcc.gnu.org/pipermail/gcc-patches/>",
        "List-Post": "<mailto:gcc-patches@gcc.gnu.org>",
        "List-Help": "<mailto:gcc-patches-request@gcc.gnu.org?subject=help>",
        "List-Subscribe": "<https://gcc.gnu.org/mailman/listinfo/gcc-patches>,\n <mailto:gcc-patches-request@gcc.gnu.org?subject=subscribe>",
        "Reply-To": "gcc-patches mailing list <gcc-patches@gcc.gnu.org>,\n pinskia@gcc.gnu.org",
        "Errors-To": "gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org"
    },
    "content": "From: Andrew Pinski <andrew.pinski@oss.qualcomm.com>\n\nThese atomic optimizations that are currently in fab are really an\ninstruction selection like optimizations so let's move them to gimple-isel.cc.\n\nNote since this is the last manual optimization left in fab, I have simplified\nthe code to only fold internal and normal builtins. The next patch will remove all\nof fab.\n\nBootstrapped and tested on x86_64-linux-gnu.\n\ngcc/ChangeLog:\n\n\t* gimple-isel.cc (gimple_nop_atomic_bit_test_and_p): New decl.\n\t(gimple_nop_convert): Likewise.\n\t(convert_atomic_bit_not): Moved from tree-ssa-ccp.cc.\n\t(optimize_atomic_bit_test_and): Likewise.\n\t(optimize_atomic_op_fetch_cmp_0): Likewise.\n\t(gimple_isel_builtin_call): New function.\n\t(CASE_ATOMIC): Moved from tree-ssa-ccp.cc.\n\t(CASE_ATOMIC_CMP0): Likewise.\n\t(CASE_ATOMIC_BIT_TEST_AND): Likewise.\n\t(pass_gimple_isel::execute): For calls just call gimple_isel_builtin_call.\n\t* tree-ssa-ccp.cc (convert_atomic_bit_not): Move to gimple-isel.cc.\n\t(gimple_nop_atomic_bit_test_and_p): Likewise.\n\t(gimple_nop_convert): Likewise.\n\t(optimize_atomic_bit_test_and): Likewise.\n\t(optimize_atomic_op_fetch_cmp_0): Likewise.\n\t(pass_fold_builtins::execute): Just call fold_stmt for internal\n\tor normal bultin calls.\n\t(CASE_ATOMIC): Move to gimple-isel.cc.\n\t(CASE_ATOMIC_CMP0): Likewise.\n\t(CASE_ATOMIC_BIT_TEST_AND): Likewise.\n\nSigned-off-by: Andrew Pinski <andrew.pinski@oss.qualcomm.com>\n---\n gcc/gimple-isel.cc  |  956 ++++++++++++++++++++++++++++++++++++++++\n gcc/tree-ssa-ccp.cc | 1005 +------------------------------------------\n 2 files changed, 963 insertions(+), 998 deletions(-)",
    "diff": "diff --git a/gcc/gimple-isel.cc b/gcc/gimple-isel.cc\nindex 0d2efcba547d..b5dc579ff467 100644\n--- a/gcc/gimple-isel.cc\n+++ b/gcc/gimple-isel.cc\n@@ -39,6 +39,7 @@ along with GCC; see the file COPYING3.  If not see\n #include \"optabs.h\"\n #include \"gimple-fold.h\"\n #include \"internal-fn.h\"\n+#include \"fold-const.h\"\n \n /* Expand all ARRAY_REF(VIEW_CONVERT_EXPR) gimple assignments into calls to\n    internal function based on vector type of selected expansion.\n@@ -349,6 +350,15 @@ maybe_duplicate_comparison (gassign *stmt, basic_block bb)\n     }\n }\n \n+/* match.pd function to match atomic_bit_test_and pattern which\n+   has nop_convert:\n+     _1 = __atomic_fetch_or_4 (&v, 1, 0);\n+     _2 = (int) _1;\n+     _5 = _2 & 1;\n+ */\n+extern bool gimple_nop_atomic_bit_test_and_p (tree, tree *,\n+\t\t\t\t\t      tree (*) (tree));\n+extern bool gimple_nop_convert (tree, tree*, tree (*) (tree));\n \n namespace {\n \n@@ -382,6 +392,947 @@ public:\n }; // class pass_gimple_isel\n \n \n+\n+/* Convert\n+   _1 = __atomic_fetch_or_* (ptr_6, 1, _3);\n+   _7 = ~_1;\n+   _5 = (_Bool) _7;\n+   to\n+   _1 = __atomic_fetch_or_* (ptr_6, 1, _3);\n+   _8 = _1 & 1;\n+   _5 = _8 == 0;\n+   and convert\n+   _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);\n+   _7 = ~_1;\n+   _4 = (_Bool) _7;\n+   to\n+   _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);\n+   _8 = _1 & 1;\n+   _4 = (_Bool) _8;\n+\n+   USE_STMT is the gimplt statement which uses the return value of\n+   __atomic_fetch_or_*.  LHS is the return value of __atomic_fetch_or_*.\n+   MASK is the mask passed to __atomic_fetch_or_*.\n+ */\n+\n+static gimple *\n+convert_atomic_bit_not (enum internal_fn fn, gimple *use_stmt,\n+\t\t\ttree lhs, tree mask)\n+{\n+  tree and_mask;\n+  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)\n+    {\n+      /* MASK must be ~1.  */\n+      if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs),\n+\t\t\t\t\t   ~HOST_WIDE_INT_1), mask, 0))\n+\treturn nullptr;\n+      and_mask = build_int_cst (TREE_TYPE (lhs), 1);\n+    }\n+  else\n+    {\n+      /* MASK must be 1.  */\n+      if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs), 1), mask, 0))\n+\treturn nullptr;\n+      and_mask = mask;\n+    }\n+\n+  tree use_lhs = gimple_assign_lhs (use_stmt);\n+\n+  use_operand_p use_p;\n+  gimple *use_not_stmt;\n+\n+  if (!single_imm_use (use_lhs, &use_p, &use_not_stmt)\n+      || !is_gimple_assign (use_not_stmt))\n+    return nullptr;\n+\n+  if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_not_stmt)))\n+    return nullptr;\n+\n+  tree use_not_lhs = gimple_assign_lhs (use_not_stmt);\n+  if (TREE_CODE (TREE_TYPE (use_not_lhs)) != BOOLEAN_TYPE)\n+    return nullptr;\n+\n+  gimple_stmt_iterator gsi;\n+  tree var = make_ssa_name (TREE_TYPE (lhs));\n+  /* use_stmt need to be removed after use_nop_stmt,\n+     so use_lhs can be released.  */\n+  gimple *use_stmt_removal = use_stmt;\n+  use_stmt = gimple_build_assign (var, BIT_AND_EXPR, lhs, and_mask);\n+  gsi = gsi_for_stmt (use_not_stmt);\n+  gsi_insert_before (&gsi, use_stmt, GSI_NEW_STMT);\n+  lhs = gimple_assign_lhs (use_not_stmt);\n+  gimple *g = gimple_build_assign (lhs, EQ_EXPR, var,\n+\t\t\t\t   build_zero_cst (TREE_TYPE (mask)));\n+  gsi_insert_after (&gsi, g, GSI_NEW_STMT);\n+  gsi = gsi_for_stmt (use_not_stmt);\n+  gsi_remove (&gsi, true);\n+  gsi = gsi_for_stmt (use_stmt_removal);\n+  gsi_remove (&gsi, true);\n+  return use_stmt;\n+}\n+\n+/* Optimize\n+     mask_2 = 1 << cnt_1;\n+     _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);\n+     _5 = _4 & mask_2;\n+   to\n+     _4 = .ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);\n+     _5 = _4;\n+   If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1\n+   is passed instead of 0, and the builtin just returns a zero\n+   or 1 value instead of the actual bit.\n+   Similarly for __sync_fetch_and_or_* (without the \", _3\" part\n+   in there), and/or if mask_2 is a power of 2 constant.\n+   Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT\n+   in that case.  And similarly for and instead of or, except that\n+   the second argument to the builtin needs to be one's complement\n+   of the mask instead of mask.  */\n+\n+static bool\n+optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,\n+\t\t\t      enum internal_fn fn, bool has_model_arg,\n+\t\t\t      bool after)\n+{\n+  gimple *call = gsi_stmt (*gsip);\n+  tree lhs = gimple_call_lhs (call);\n+  use_operand_p use_p;\n+  gimple *use_stmt;\n+  tree mask;\n+  optab optab;\n+\n+  if (!flag_inline_atomics\n+      || optimize_debug\n+      || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)\n+      || !lhs\n+      || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)\n+      || !single_imm_use (lhs, &use_p, &use_stmt)\n+      || !is_gimple_assign (use_stmt)\n+      || !gimple_vdef (call))\n+    return false;\n+\n+  switch (fn)\n+    {\n+    case IFN_ATOMIC_BIT_TEST_AND_SET:\n+      optab = atomic_bit_test_and_set_optab;\n+      break;\n+    case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:\n+      optab = atomic_bit_test_and_complement_optab;\n+      break;\n+    case IFN_ATOMIC_BIT_TEST_AND_RESET:\n+      optab = atomic_bit_test_and_reset_optab;\n+      break;\n+    default:\n+      return false;\n+    }\n+\n+  tree bit = nullptr;\n+\n+  mask = gimple_call_arg (call, 1);\n+  tree_code rhs_code = gimple_assign_rhs_code (use_stmt);\n+  if (rhs_code != BIT_AND_EXPR)\n+    {\n+      if (rhs_code != NOP_EXPR && rhs_code != BIT_NOT_EXPR)\n+\treturn false;\n+\n+      tree use_lhs = gimple_assign_lhs (use_stmt);\n+      if (TREE_CODE (use_lhs) == SSA_NAME\n+\t  && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))\n+\treturn false;\n+\n+      tree use_rhs = gimple_assign_rhs1 (use_stmt);\n+      if (lhs != use_rhs)\n+\treturn false;\n+\n+      if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)))\n+\t  == CODE_FOR_nothing)\n+\treturn false;\n+\n+      gimple *g;\n+      gimple_stmt_iterator gsi;\n+      tree var;\n+      int ibit = -1;\n+\n+      if (rhs_code == BIT_NOT_EXPR)\n+\t{\n+\t  g = convert_atomic_bit_not (fn, use_stmt, lhs, mask);\n+\t  if (!g)\n+\t    return false;\n+\t  use_stmt = g;\n+\t  ibit = 0;\n+\t}\n+      else if (TREE_CODE (TREE_TYPE (use_lhs)) == BOOLEAN_TYPE)\n+\t{\n+\t  tree and_mask;\n+\t  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)\n+\t    {\n+\t      /* MASK must be ~1.  */\n+\t      if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs),\n+\t\t\t\t\t\t   ~HOST_WIDE_INT_1),\n+\t\t\t\t    mask, 0))\n+\t\treturn false;\n+\n+\t      /* Convert\n+\t\t _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);\n+\t\t _4 = (_Bool) _1;\n+\t\t to\n+\t\t _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);\n+\t\t _5 = _1 & 1;\n+\t\t _4 = (_Bool) _5;\n+\t       */\n+\t      and_mask = build_int_cst (TREE_TYPE (lhs), 1);\n+\t    }\n+\t  else\n+\t    {\n+\t      and_mask = build_int_cst (TREE_TYPE (lhs), 1);\n+\t      if (!operand_equal_p (and_mask, mask, 0))\n+\t\treturn false;\n+\n+\t      /* Convert\n+\t\t _1 = __atomic_fetch_or_* (ptr_6, 1, _3);\n+\t\t _4 = (_Bool) _1;\n+\t\t to\n+\t\t _1 = __atomic_fetch_or_* (ptr_6, 1, _3);\n+\t\t _5 = _1 & 1;\n+\t\t _4 = (_Bool) _5;\n+\t       */\n+\t    }\n+\t  var = make_ssa_name (TREE_TYPE (use_rhs));\n+\t  replace_uses_by (use_rhs, var);\n+\t  g = gimple_build_assign (var, BIT_AND_EXPR, use_rhs,\n+\t\t\t\t   and_mask);\n+\t  gsi = gsi_for_stmt (use_stmt);\n+\t  gsi_insert_before (&gsi, g, GSI_NEW_STMT);\n+\t  use_stmt = g;\n+\t  ibit = 0;\n+\t}\n+      else if (TYPE_PRECISION (TREE_TYPE (use_lhs))\n+\t       <= TYPE_PRECISION (TREE_TYPE (use_rhs)))\n+\t{\n+\t  gimple *use_nop_stmt;\n+\t  if (!single_imm_use (use_lhs, &use_p, &use_nop_stmt)\n+\t      || (!is_gimple_assign (use_nop_stmt)\n+\t\t  && gimple_code (use_nop_stmt) != GIMPLE_COND))\n+\t    return false;\n+\t  /* Handle both\n+\t     _4 = _5 < 0;\n+\t     and\n+\t     if (_5 < 0)\n+\t   */\n+\t  tree use_nop_lhs = nullptr;\n+\t  rhs_code = ERROR_MARK;\n+\t  if (is_gimple_assign (use_nop_stmt))\n+\t    {\n+\t      use_nop_lhs = gimple_assign_lhs (use_nop_stmt);\n+\t      rhs_code = gimple_assign_rhs_code (use_nop_stmt);\n+\t    }\n+\t  if (!use_nop_lhs || rhs_code != BIT_AND_EXPR)\n+\t    {\n+\t      /* Also handle\n+\t\t if (_5 < 0)\n+\t       */\n+\t      if (use_nop_lhs\n+\t\t  && TREE_CODE (use_nop_lhs) == SSA_NAME\n+\t\t  && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_nop_lhs))\n+\t\treturn false;\n+\t      if (use_nop_lhs && rhs_code == BIT_NOT_EXPR)\n+\t\t{\n+\t\t  /* Handle\n+\t\t     _7 = ~_2;\n+\t\t   */\n+\t\t  g = convert_atomic_bit_not (fn, use_nop_stmt, lhs,\n+\t\t\t\t\t      mask);\n+\t\t  if (!g)\n+\t\t    return false;\n+\t\t  /* Convert\n+\t\t     _1 = __atomic_fetch_or_4 (ptr_6, 1, _3);\n+\t\t     _2 = (int) _1;\n+\t\t     _7 = ~_2;\n+\t\t     _5 = (_Bool) _7;\n+\t\t     to\n+\t\t     _1 = __atomic_fetch_or_4 (ptr_6, ~1, _3);\n+\t\t     _8 = _1 & 1;\n+\t\t     _5 = _8 == 0;\n+\t\t     and convert\n+\t\t     _1 = __atomic_fetch_and_4 (ptr_6, ~1, _3);\n+\t\t     _2 = (int) _1;\n+\t\t     _7 = ~_2;\n+\t\t     _5 = (_Bool) _7;\n+\t\t     to\n+\t\t     _1 = __atomic_fetch_and_4 (ptr_6, 1, _3);\n+\t\t     _8 = _1 & 1;\n+\t\t     _5 = _8 == 0;\n+\t\t   */\n+\t\t  gsi = gsi_for_stmt (use_stmt);\n+\t\t  gsi_remove (&gsi, true);\n+\t\t  use_stmt = g;\n+\t\t  ibit = 0;\n+\t\t}\n+\t      else\n+\t\t{\n+\t\t  tree cmp_rhs1, cmp_rhs2;\n+\t\t  if (use_nop_lhs)\n+\t\t    {\n+\t\t      /* Handle\n+\t\t\t _4 = _5 < 0;\n+\t\t       */\n+\t\t      if (TREE_CODE (TREE_TYPE (use_nop_lhs))\n+\t\t\t  != BOOLEAN_TYPE)\n+\t\t\treturn false;\n+\t\t      cmp_rhs1 = gimple_assign_rhs1 (use_nop_stmt);\n+\t\t      cmp_rhs2 = gimple_assign_rhs2 (use_nop_stmt);\n+\t\t    }\n+\t\t  else\n+\t\t    {\n+\t\t      /* Handle\n+\t\t\t if (_5 < 0)\n+\t\t       */\n+\t\t      rhs_code = gimple_cond_code (use_nop_stmt);\n+\t\t      cmp_rhs1 = gimple_cond_lhs (use_nop_stmt);\n+\t\t      cmp_rhs2 = gimple_cond_rhs (use_nop_stmt);\n+\t\t    }\n+\t\t  if (rhs_code != GE_EXPR && rhs_code != LT_EXPR)\n+\t\t    return false;\n+\t\t  if (use_lhs != cmp_rhs1)\n+\t\t    return false;\n+\t\t  if (!integer_zerop (cmp_rhs2))\n+\t\t    return false;\n+\n+\t\t  tree and_mask;\n+\n+\t\t  unsigned HOST_WIDE_INT bytes\n+\t\t    = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (use_rhs)));\n+\t\t  ibit = bytes * BITS_PER_UNIT - 1;\n+\t\t  unsigned HOST_WIDE_INT highest\n+\t\t    = HOST_WIDE_INT_1U << ibit;\n+\n+\t\t  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)\n+\t\t    {\n+\t\t      /* Get the signed maximum of the USE_RHS type.  */\n+\t\t      and_mask = build_int_cst (TREE_TYPE (use_rhs),\n+\t\t\t\t\t\thighest - 1);\n+\t\t      if (!operand_equal_p (and_mask, mask, 0))\n+\t\t\treturn false;\n+\n+\t\t      /* Convert\n+\t\t\t _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);\n+\t\t\t _5 = (signed int) _1;\n+\t\t\t _4 = _5 < 0 or _5 >= 0;\n+\t\t\t to\n+\t\t\t _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);\n+\t\t\t _6 = _1 & 0x80000000;\n+\t\t\t _4 = _6 != 0 or _6 == 0;\n+\t\t\t and convert\n+\t\t\t _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);\n+\t\t\t _5 = (signed int) _1;\n+\t\t\t if (_5 < 0 or _5 >= 0)\n+\t\t\t to\n+\t\t\t _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);\n+\t\t\t _6 = _1 & 0x80000000;\n+\t\t\t if (_6 != 0 or _6 == 0)\n+\t\t       */\n+\t\t      and_mask = build_int_cst (TREE_TYPE (use_rhs),\n+\t\t\t\t\t\thighest);\n+\t\t    }\n+\t\t  else\n+\t\t    {\n+\t\t      /* Get the signed minimum of the USE_RHS type.  */\n+\t\t      and_mask = build_int_cst (TREE_TYPE (use_rhs),\n+\t\t\t\t\t\thighest);\n+\t\t      if (!operand_equal_p (and_mask, mask, 0))\n+\t\t\treturn false;\n+\n+\t\t      /* Convert\n+\t\t\t _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);\n+\t\t\t _5 = (signed int) _1;\n+\t\t\t _4 = _5 < 0 or _5 >= 0;\n+\t\t\t to\n+\t\t\t _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);\n+\t\t\t _6 = _1 & 0x80000000;\n+\t\t\t _4 = _6 != 0 or _6 == 0;\n+\t\t\t and convert\n+\t\t\t _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);\n+\t\t\t _5 = (signed int) _1;\n+\t\t\t if (_5 < 0 or _5 >= 0)\n+\t\t\t to\n+\t\t\t _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);\n+\t\t\t _6 = _1 & 0x80000000;\n+\t\t\t if (_6 != 0 or _6 == 0)\n+\t\t       */\n+\t\t    }\n+\t\t  var = make_ssa_name (TREE_TYPE (use_rhs));\n+\t\t  gimple* use_stmt_removal = use_stmt;\n+\t\t  g = gimple_build_assign (var, BIT_AND_EXPR, use_rhs,\n+\t\t\t\t\t   and_mask);\n+\t\t  gsi = gsi_for_stmt (use_nop_stmt);\n+\t\t  gsi_insert_before (&gsi, g, GSI_NEW_STMT);\n+\t\t  use_stmt = g;\n+\t\t  rhs_code = rhs_code == GE_EXPR ? EQ_EXPR : NE_EXPR;\n+\t\t  tree const_zero = build_zero_cst (TREE_TYPE (use_rhs));\n+\t\t  if (use_nop_lhs)\n+\t\t    g = gimple_build_assign (use_nop_lhs, rhs_code,\n+\t\t\t\t\t     var, const_zero);\n+\t\t  else\n+\t\t    g = gimple_build_cond (rhs_code, var, const_zero,\n+\t\t\t\t\t   nullptr, nullptr);\n+\t\t  gsi_insert_after (&gsi, g, GSI_NEW_STMT);\n+\t\t  gsi = gsi_for_stmt (use_nop_stmt);\n+\t\t  gsi_remove (&gsi, true);\n+\t\t  gsi = gsi_for_stmt (use_stmt_removal);\n+\t\t  gsi_remove (&gsi, true);\n+\t\t}\n+\t    }\n+\t  else\n+\t    {\n+\t      tree match_op[3];\n+\t      gimple *g;\n+\t      if (!gimple_nop_atomic_bit_test_and_p (use_nop_lhs,\n+\t\t\t\t\t\t     &match_op[0], NULL)\n+\t\t  || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (match_op[2])\n+\t\t  || !single_imm_use (match_op[2], &use_p, &g)\n+\t\t  || !is_gimple_assign (g))\n+\t\treturn false;\n+\t      mask = match_op[0];\n+\t      if (TREE_CODE (match_op[1]) == INTEGER_CST)\n+\t\t{\n+\t\t  ibit = tree_log2 (match_op[1]);\n+\t\t  gcc_assert (ibit >= 0);\n+\t\t}\n+\t      else\n+\t\t{\n+\t\t  g = SSA_NAME_DEF_STMT (match_op[1]);\n+\t\t  gcc_assert (is_gimple_assign (g));\n+\t\t  bit = gimple_assign_rhs2 (g);\n+\t\t}\n+\t      /* Convert\n+\t\t _1 = __atomic_fetch_or_4 (ptr_6, mask, _3);\n+\t\t _2 = (int) _1;\n+\t\t _5 = _2 & mask;\n+\t\t to\n+\t\t _1 = __atomic_fetch_or_4 (ptr_6, mask, _3);\n+\t\t _6 = _1 & mask;\n+\t\t _5 = (int) _6;\n+\t\t and convert\n+\t\t _1 = ~mask_7;\n+\t\t _2 = (unsigned int) _1;\n+\t\t _3 = __atomic_fetch_and_4 (ptr_6, _2, 0);\n+\t\t _4 = (int) _3;\n+\t\t _5 = _4 & mask_7;\n+\t\t to\n+\t\t _1 = __atomic_fetch_and_* (ptr_6, ~mask_7, _3);\n+\t\t _12 = _3 & mask_7;\n+\t\t _5 = (int) _12;\n+\n+\t\t and Convert\n+\t\t _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3);\n+\t\t _2 = (short int) _1;\n+\t\t _5 = _2 & mask;\n+\t\t to\n+\t\t _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3);\n+\t\t _8 = _1 & mask;\n+\t\t _5 = (short int) _8;\n+\t      */\n+\t      gimple_seq stmts = NULL;\n+\t      match_op[1] = gimple_convert (&stmts,\n+\t\t\t\t\t    TREE_TYPE (use_rhs),\n+\t\t\t\t\t    match_op[1]);\n+\t      var = gimple_build (&stmts, BIT_AND_EXPR,\n+\t\t\t\t  TREE_TYPE (use_rhs), use_rhs, match_op[1]);\n+\t      gsi = gsi_for_stmt (use_stmt);\n+\t      gsi_remove (&gsi, true);\n+\t      release_defs (use_stmt);\n+\t      use_stmt = gimple_seq_last_stmt (stmts);\n+\t      gsi = gsi_for_stmt (use_nop_stmt);\n+\t      gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);\n+\t      gimple_assign_set_rhs_with_ops (&gsi, CONVERT_EXPR, var);\n+\t      update_stmt (use_nop_stmt);\n+\t    }\n+\t}\n+      else\n+\treturn false;\n+\n+      if (!bit)\n+\t{\n+\t  if (ibit < 0)\n+\t    gcc_unreachable ();\n+\t  bit = build_int_cst (TREE_TYPE (lhs), ibit);\n+\t}\n+    }\n+  else if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)))\n+\t   == CODE_FOR_nothing)\n+    return false;\n+\n+  tree use_lhs = gimple_assign_lhs (use_stmt);\n+  if (!use_lhs)\n+    return false;\n+\n+  if (!bit)\n+    {\n+      if (TREE_CODE (mask) == INTEGER_CST)\n+\t{\n+\t  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)\n+\t    mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);\n+\t  mask = fold_convert (TREE_TYPE (lhs), mask);\n+\t  int ibit = tree_log2 (mask);\n+\t  if (ibit < 0)\n+\t    return false;\n+\t  bit = build_int_cst (TREE_TYPE (lhs), ibit);\n+\t}\n+      else if (TREE_CODE (mask) == SSA_NAME)\n+\t{\n+\t  gimple *g = SSA_NAME_DEF_STMT (mask);\n+\t  tree match_op;\n+\t  if (gimple_nop_convert (mask, &match_op, NULL))\n+\t    {\n+\t      mask = match_op;\n+\t      if (TREE_CODE (mask) != SSA_NAME)\n+\t\treturn false;\n+\t      g = SSA_NAME_DEF_STMT (mask);\n+\t    }\n+\t  if (!is_gimple_assign (g))\n+\t    return false;\n+\n+\t  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)\n+\t    {\n+\t      if (gimple_assign_rhs_code (g) != BIT_NOT_EXPR)\n+\t\treturn false;\n+\t      mask = gimple_assign_rhs1 (g);\n+\t      if (TREE_CODE (mask) != SSA_NAME)\n+\t\treturn false;\n+\t      g = SSA_NAME_DEF_STMT (mask);\n+\t    }\n+\n+\t  if (!is_gimple_assign (g)\n+\t      || gimple_assign_rhs_code (g) != LSHIFT_EXPR\n+\t      || !integer_onep (gimple_assign_rhs1 (g)))\n+\t    return false;\n+\t  bit = gimple_assign_rhs2 (g);\n+\t}\n+      else\n+\treturn false;\n+\n+      tree cmp_mask;\n+      if (gimple_assign_rhs1 (use_stmt) == lhs)\n+\tcmp_mask = gimple_assign_rhs2 (use_stmt);\n+      else\n+\tcmp_mask = gimple_assign_rhs1 (use_stmt);\n+\n+      tree match_op;\n+      if (gimple_nop_convert (cmp_mask, &match_op, NULL))\n+\tcmp_mask = match_op;\n+\n+      if (!operand_equal_p (cmp_mask, mask, 0))\n+\treturn false;\n+    }\n+\n+  bool use_bool = true;\n+  bool has_debug_uses = false;\n+  imm_use_iterator iter;\n+  gimple *g;\n+\n+  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))\n+    use_bool = false;\n+  FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)\n+    {\n+      enum tree_code code = ERROR_MARK;\n+      tree op0 = NULL_TREE, op1 = NULL_TREE;\n+      if (is_gimple_debug (g))\n+\t{\n+\t  has_debug_uses = true;\n+\t  continue;\n+\t}\n+      else if (is_gimple_assign (g))\n+\tswitch (gimple_assign_rhs_code (g))\n+\t  {\n+\t  case COND_EXPR:\n+\t    op1 = gimple_assign_rhs1 (g);\n+\t    code = TREE_CODE (op1);\n+\t    if (TREE_CODE_CLASS (code) != tcc_comparison)\n+\t      break;\n+\t    op0 = TREE_OPERAND (op1, 0);\n+\t    op1 = TREE_OPERAND (op1, 1);\n+\t    break;\n+\t  case EQ_EXPR:\n+\t  case NE_EXPR:\n+\t    code = gimple_assign_rhs_code (g);\n+\t    op0 = gimple_assign_rhs1 (g);\n+\t    op1 = gimple_assign_rhs2 (g);\n+\t    break;\n+\t  default:\n+\t    break;\n+\t  }\n+      else if (gimple_code (g) == GIMPLE_COND)\n+\t{\n+\t  code = gimple_cond_code (g);\n+\t  op0 = gimple_cond_lhs (g);\n+\t  op1 = gimple_cond_rhs (g);\n+\t}\n+\n+      if ((code == EQ_EXPR || code == NE_EXPR)\n+\t  && op0 == use_lhs\n+\t  && integer_zerop (op1))\n+\t{\n+\t  use_operand_p use_p;\n+\t  int n = 0;\n+\t  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)\n+\t    n++;\n+\t  if (n == 1)\n+\t    continue;\n+\t}\n+\n+      use_bool = false;\n+      break;\n+    }\n+\n+  tree new_lhs = make_ssa_name (TREE_TYPE (lhs));\n+  tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);\n+  if (has_model_arg)\n+    g = gimple_build_call_internal (fn, 5, gimple_call_arg (call, 0),\n+\t\t\t\t    bit, flag, gimple_call_arg (call, 2),\n+\t\t\t\t    gimple_call_fn (call));\n+  else\n+    g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),\n+\t\t\t\t    bit, flag, gimple_call_fn (call));\n+  gimple_call_set_lhs (g, new_lhs);\n+  gimple_set_location (g, gimple_location (call));\n+  gimple_move_vops (g, call);\n+  bool throws = stmt_can_throw_internal (cfun, call);\n+  gimple_call_set_nothrow (as_a <gcall *> (g),\n+\t\t\t   gimple_call_nothrow_p (as_a <gcall *> (call)));\n+  gimple_stmt_iterator gsi = *gsip;\n+  gsi_insert_after (&gsi, g, GSI_NEW_STMT);\n+  edge e = NULL;\n+  if (throws)\n+    {\n+      maybe_clean_or_replace_eh_stmt (call, g);\n+      if (after || (use_bool && has_debug_uses))\n+\te = find_fallthru_edge (gsi_bb (gsi)->succs);\n+    }\n+  if (after)\n+    {\n+      /* The internal function returns the value of the specified bit\n+\t before the atomic operation.  If we are interested in the value\n+\t of the specified bit after the atomic operation (makes only sense\n+\t for xor, otherwise the bit content is compile time known),\n+\t we need to invert the bit.  */\n+      tree mask_convert = mask;\n+      gimple_seq stmts = NULL;\n+      if (!use_bool)\n+\tmask_convert = gimple_convert (&stmts, TREE_TYPE (lhs), mask);\n+      new_lhs = gimple_build (&stmts, BIT_XOR_EXPR, TREE_TYPE (lhs), new_lhs,\n+\t\t\t      use_bool ? build_int_cst (TREE_TYPE (lhs), 1)\n+\t\t\t\t       : mask_convert);\n+      if (throws)\n+\t{\n+\t  gsi_insert_seq_on_edge_immediate (e, stmts);\n+\t  gsi = gsi_for_stmt (gimple_seq_last (stmts));\n+\t}\n+      else\n+\tgsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);\n+    }\n+  if (use_bool && has_debug_uses)\n+    {\n+      tree temp = NULL_TREE;\n+      if (!throws || after || single_pred_p (e->dest))\n+\t{\n+\t  temp = build_debug_expr_decl (TREE_TYPE (lhs));\n+\t  tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);\n+\t  g = gimple_build_debug_bind (temp, t, g);\n+\t  if (throws && !after)\n+\t    {\n+\t      gsi = gsi_after_labels (e->dest);\n+\t      gsi_insert_before (&gsi, g, GSI_SAME_STMT);\n+\t    }\n+\t  else\n+\t    gsi_insert_after (&gsi, g, GSI_NEW_STMT);\n+\t}\n+      FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)\n+\tif (is_gimple_debug (g))\n+\t  {\n+\t    use_operand_p use_p;\n+\t    if (temp == NULL_TREE)\n+\t      gimple_debug_bind_reset_value (g);\n+\t    else\n+\t      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)\n+\t\tSET_USE (use_p, temp);\n+\t    update_stmt (g);\n+\t  }\n+    }\n+  SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)\n+    = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);\n+  replace_uses_by (use_lhs, new_lhs);\n+  gsi = gsi_for_stmt (use_stmt);\n+  gsi_remove (&gsi, true);\n+  release_defs (use_stmt);\n+  gsi_remove (gsip, true);\n+  release_ssa_name (lhs);\n+  return true;\n+}\n+\n+/* Optimize\n+     _4 = __atomic_add_fetch_* (ptr_6, arg_2, _3);\n+     _5 = _4 == 0;\n+   to\n+     _4 = .ATOMIC_ADD_FETCH_CMP_0 (EQ_EXPR, ptr_6, arg_2, _3);\n+     _5 = _4;\n+   Similarly for __sync_add_and_fetch_* (without the \", _3\" part\n+   in there).  */\n+\n+static bool\n+optimize_atomic_op_fetch_cmp_0 (gimple_stmt_iterator *gsip,\n+\t\t\t\tenum internal_fn fn, bool has_model_arg)\n+{\n+  gimple *call = gsi_stmt (*gsip);\n+  tree lhs = gimple_call_lhs (call);\n+  use_operand_p use_p;\n+  gimple *use_stmt;\n+\n+  if (!flag_inline_atomics\n+      || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)\n+      || !lhs\n+      || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)\n+      || !single_imm_use (lhs, &use_p, &use_stmt)\n+      || !gimple_vdef (call))\n+    return false;\n+\n+  optab optab;\n+  switch (fn)\n+    {\n+    case IFN_ATOMIC_ADD_FETCH_CMP_0:\n+      optab = atomic_add_fetch_cmp_0_optab;\n+      break;\n+    case IFN_ATOMIC_SUB_FETCH_CMP_0:\n+      optab = atomic_sub_fetch_cmp_0_optab;\n+      break;\n+    case IFN_ATOMIC_AND_FETCH_CMP_0:\n+      optab = atomic_and_fetch_cmp_0_optab;\n+      break;\n+    case IFN_ATOMIC_OR_FETCH_CMP_0:\n+      optab = atomic_or_fetch_cmp_0_optab;\n+      break;\n+    case IFN_ATOMIC_XOR_FETCH_CMP_0:\n+      optab = atomic_xor_fetch_cmp_0_optab;\n+      break;\n+    default:\n+      return false;\n+    }\n+\n+  if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)))\n+      == CODE_FOR_nothing)\n+    return false;\n+\n+  tree use_lhs = lhs;\n+  if (gimple_assign_cast_p (use_stmt))\n+    {\n+      use_lhs = gimple_assign_lhs (use_stmt);\n+      if (!tree_nop_conversion_p (TREE_TYPE (use_lhs), TREE_TYPE (lhs))\n+\t  || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))\n+\t      && !POINTER_TYPE_P (TREE_TYPE (use_lhs)))\n+\t  || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs)\n+\t  || !single_imm_use (use_lhs, &use_p, &use_stmt))\n+\treturn false;\n+    }\n+  enum tree_code code = ERROR_MARK;\n+  tree op0 = NULL_TREE, op1 = NULL_TREE;\n+  if (is_gimple_assign (use_stmt))\n+    switch (gimple_assign_rhs_code (use_stmt))\n+      {\n+      case COND_EXPR:\n+\top1 = gimple_assign_rhs1 (use_stmt);\n+\tcode = TREE_CODE (op1);\n+\tif (TREE_CODE_CLASS (code) == tcc_comparison)\n+\t  {\n+\t    op0 = TREE_OPERAND (op1, 0);\n+\t    op1 = TREE_OPERAND (op1, 1);\n+\t  }\n+\tbreak;\n+      default:\n+\tcode = gimple_assign_rhs_code (use_stmt);\n+\tif (TREE_CODE_CLASS (code) == tcc_comparison)\n+\t  {\n+\t    op0 = gimple_assign_rhs1 (use_stmt);\n+\t    op1 = gimple_assign_rhs2 (use_stmt);\n+\t  }\n+\tbreak;\n+      }\n+  else if (gimple_code (use_stmt) == GIMPLE_COND)\n+    {\n+      code = gimple_cond_code (use_stmt);\n+      op0 = gimple_cond_lhs (use_stmt);\n+      op1 = gimple_cond_rhs (use_stmt);\n+    }\n+\n+  switch (code)\n+    {\n+    case LT_EXPR:\n+    case LE_EXPR:\n+    case GT_EXPR:\n+    case GE_EXPR:\n+      if (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))\n+\t  || TREE_CODE (TREE_TYPE (use_lhs)) == BOOLEAN_TYPE\n+\t  || TYPE_UNSIGNED (TREE_TYPE (use_lhs)))\n+\treturn false;\n+      /* FALLTHRU */\n+    case EQ_EXPR:\n+    case NE_EXPR:\n+      if (op0 == use_lhs && integer_zerop (op1))\n+\tbreak;\n+      return false;\n+    default:\n+      return false;\n+    }\n+\n+  int encoded;\n+  switch (code)\n+    {\n+    /* Use special encoding of the operation.  We want to also\n+       encode the mode in the first argument and for neither EQ_EXPR\n+       etc. nor EQ etc. we can rely it will fit into QImode.  */\n+    case EQ_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_EQ; break;\n+    case NE_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_NE; break;\n+    case LT_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_LT; break;\n+    case LE_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_LE; break;\n+    case GT_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_GT; break;\n+    case GE_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_GE; break;\n+    default: gcc_unreachable ();\n+    }\n+\n+  tree new_lhs = make_ssa_name (boolean_type_node);\n+  gimple *g;\n+  tree flag = build_int_cst (TREE_TYPE (lhs), encoded);\n+  if (has_model_arg)\n+    g = gimple_build_call_internal (fn, 5, flag,\n+\t\t\t\t    gimple_call_arg (call, 0),\n+\t\t\t\t    gimple_call_arg (call, 1),\n+\t\t\t\t    gimple_call_arg (call, 2),\n+\t\t\t\t    gimple_call_fn (call));\n+  else\n+    g = gimple_build_call_internal (fn, 4, flag,\n+\t\t\t\t    gimple_call_arg (call, 0),\n+\t\t\t\t    gimple_call_arg (call, 1),\n+\t\t\t\t    gimple_call_fn (call));\n+  gimple_call_set_lhs (g, new_lhs);\n+  gimple_set_location (g, gimple_location (call));\n+  gimple_move_vops (g, call);\n+  bool throws = stmt_can_throw_internal (cfun, call);\n+  gimple_call_set_nothrow (as_a <gcall *> (g),\n+\t\t\t   gimple_call_nothrow_p (as_a <gcall *> (call)));\n+  gimple_stmt_iterator gsi = *gsip;\n+  gsi_insert_after (&gsi, g, GSI_SAME_STMT);\n+  if (throws)\n+    maybe_clean_or_replace_eh_stmt (call, g);\n+  if (is_gimple_assign (use_stmt))\n+    switch (gimple_assign_rhs_code (use_stmt))\n+      {\n+      case COND_EXPR:\n+\tgimple_assign_set_rhs1 (use_stmt, new_lhs);\n+\tbreak;\n+      default:\n+\tgsi = gsi_for_stmt (use_stmt);\n+\tif (tree ulhs = gimple_assign_lhs (use_stmt))\n+\t  if (useless_type_conversion_p (TREE_TYPE (ulhs),\n+\t\t\t\t\t boolean_type_node))\n+\t    {\n+\t      gimple_assign_set_rhs_with_ops (&gsi, SSA_NAME, new_lhs);\n+\t      break;\n+\t    }\n+\tgimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, new_lhs);\n+\tbreak;\n+      }\n+  else if (gimple_code (use_stmt) == GIMPLE_COND)\n+    {\n+      gcond *use_cond = as_a <gcond *> (use_stmt);\n+      gimple_cond_set_code (use_cond, NE_EXPR);\n+      gimple_cond_set_lhs (use_cond, new_lhs);\n+      gimple_cond_set_rhs (use_cond, boolean_false_node);\n+    }\n+\n+  update_stmt (use_stmt);\n+  if (use_lhs != lhs)\n+    {\n+      gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (use_lhs));\n+      gsi_remove (&gsi, true);\n+      release_ssa_name (use_lhs);\n+    }\n+  gsi_remove (gsip, true);\n+  release_ssa_name (lhs);\n+  return true;\n+}\n+\n+/* Process builtin CALL located at GSI.\n+   Currently it is only fgr atomic functions optimizations from above. */\n+static void\n+gimple_isel_builtin_call (gcall *call, gimple_stmt_iterator *gsi)\n+{\n+  /* Don't handle these in non optimization mode or optimize debug mode.  */\n+  if (!optimize || optimize_debug)\n+    return;\n+\n+  if (!gimple_call_builtin_p (call, BUILT_IN_NORMAL))\n+    return;\n+\n+  tree callee = gimple_call_fndecl (call);\n+  \n+  switch (DECL_FUNCTION_CODE (callee))\n+    {\n+#define CASE_ATOMIC(NAME) \t\t\t\\\n+      case BUILT_IN_##NAME##_1:\t\\\n+      case BUILT_IN_##NAME##_2:\t\\\n+      case BUILT_IN_##NAME##_4:\t\\\n+      case BUILT_IN_##NAME##_8:\t\\\n+      case BUILT_IN_##NAME##_16\n+#define CASE_ATOMIC_CMP0(ATOMIC, SYNC) \t\t\t\t\t\\\n+      CASE_ATOMIC(ATOMIC_##ATOMIC):\t\t\t\t\t\\\n+\toptimize_atomic_op_fetch_cmp_0 (gsi,\t\t\t\t\\\n+\t\t\t\t\tIFN_ATOMIC_##ATOMIC##_CMP_0,\t\\\n+\t\t\t\t\ttrue);\t\t\t\t\\\n+\tbreak;\t\t\t\t\t\t\t\t\\\n+      CASE_ATOMIC(SYNC_##SYNC):\t\t\t\t\t\t\\\n+\toptimize_atomic_op_fetch_cmp_0 (gsi,\t\t\t\t\\\n+\t\t\t\t\tIFN_ATOMIC_##ATOMIC##_CMP_0, \t\\\n+\t\t\t\t\tfalse);\t\t\t\t\\\n+      break;\n+\n+\n+      CASE_ATOMIC_CMP0(ADD_FETCH, ADD_AND_FETCH)\n+      CASE_ATOMIC_CMP0(SUB_FETCH, SUB_AND_FETCH)\n+      CASE_ATOMIC_CMP0(AND_FETCH, AND_AND_FETCH)\n+      CASE_ATOMIC_CMP0(OR_FETCH, OR_AND_FETCH)\n+#define CASE_ATOMIC_BIT_TEST_AND(ATOMIC, SYNC, FN, AFTER) \t\t\\\n+      CASE_ATOMIC(ATOMIC_##ATOMIC):\t\t\t\t\t\\\n+\toptimize_atomic_bit_test_and (gsi,\t\t\t\t\\\n+\t\t\t\t      IFN_ATOMIC_BIT_TEST_AND_##FN,\t\\\n+\t\t\t\t      true, AFTER);\t\t\t\\\n+\tbreak;\t\t\t\t\t\t\t\t\\\n+      CASE_ATOMIC(SYNC_##SYNC):\t\t\t\t\t\t\\\n+\toptimize_atomic_bit_test_and (gsi,\t\t\t\t\\\n+\t\t\t\t      IFN_ATOMIC_BIT_TEST_AND_##FN, \t\\\n+\t\t\t\t      false, AFTER);\t\t\t\\\n+        break;\n+      CASE_ATOMIC_BIT_TEST_AND(FETCH_OR,  FETCH_AND_OR,  SET, false)\n+      CASE_ATOMIC_BIT_TEST_AND(FETCH_XOR, FETCH_AND_XOR, COMPLEMENT, false)\n+      CASE_ATOMIC_BIT_TEST_AND(FETCH_AND, FETCH_AND_AND, RESET, false)\n+\n+      CASE_ATOMIC(ATOMIC_XOR_FETCH):\n+\tif (optimize_atomic_bit_test_and\n+\t     (gsi, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true))\n+\t  break;\n+\toptimize_atomic_op_fetch_cmp_0 (gsi,\n+\t\t\t\t\tIFN_ATOMIC_XOR_FETCH_CMP_0,\n+\t\t\t\t\ttrue);\n+\tbreak;\n+      CASE_ATOMIC(SYNC_XOR_AND_FETCH):\n+\tif (optimize_atomic_bit_test_and\n+\t      (gsi, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true))\n+\t  break;\n+\toptimize_atomic_op_fetch_cmp_0 (gsi,\n+\t\t\t\t\tIFN_ATOMIC_XOR_FETCH_CMP_0,\n+\t\t\t\t\tfalse);\n+        break;\n+\n+      default:;\n+    }\n+}\n+\n /* Iterate all gimple statements and perform pre RTL expansion\n    GIMPLE massaging to improve instruction selection.  */\n \n@@ -411,6 +1362,11 @@ pass_gimple_isel::execute (struct function *fun)\n \t  if (gsi_end_p (gsi))\n \t    break;\n \n+\t  if (gcall *call = dyn_cast <gcall*>(*gsi))\n+\t    {\n+\t      gimple_isel_builtin_call (call, &gsi);\n+\t      continue;\n+\t    }\n \t  gassign *stmt = dyn_cast <gassign *> (*gsi);\n \t  if (!stmt)\n \t    continue;\ndiff --git a/gcc/tree-ssa-ccp.cc b/gcc/tree-ssa-ccp.cc\nindex 021eb22eadd6..c884fdfffd01 100644\n--- a/gcc/tree-ssa-ccp.cc\n+++ b/gcc/tree-ssa-ccp.cc\n@@ -3085,882 +3085,6 @@ make_pass_ccp (gcc::context *ctxt)\n   return new pass_ccp (ctxt);\n }\n \n-/* Convert\n-   _1 = __atomic_fetch_or_* (ptr_6, 1, _3);\n-   _7 = ~_1;\n-   _5 = (_Bool) _7;\n-   to\n-   _1 = __atomic_fetch_or_* (ptr_6, 1, _3);\n-   _8 = _1 & 1;\n-   _5 = _8 == 0;\n-   and convert\n-   _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);\n-   _7 = ~_1;\n-   _4 = (_Bool) _7;\n-   to\n-   _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);\n-   _8 = _1 & 1;\n-   _4 = (_Bool) _8;\n-\n-   USE_STMT is the gimplt statement which uses the return value of\n-   __atomic_fetch_or_*.  LHS is the return value of __atomic_fetch_or_*.\n-   MASK is the mask passed to __atomic_fetch_or_*.\n- */\n-\n-static gimple *\n-convert_atomic_bit_not (enum internal_fn fn, gimple *use_stmt,\n-\t\t\ttree lhs, tree mask)\n-{\n-  tree and_mask;\n-  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)\n-    {\n-      /* MASK must be ~1.  */\n-      if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs),\n-\t\t\t\t\t   ~HOST_WIDE_INT_1), mask, 0))\n-\treturn nullptr;\n-      and_mask = build_int_cst (TREE_TYPE (lhs), 1);\n-    }\n-  else\n-    {\n-      /* MASK must be 1.  */\n-      if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs), 1), mask, 0))\n-\treturn nullptr;\n-      and_mask = mask;\n-    }\n-\n-  tree use_lhs = gimple_assign_lhs (use_stmt);\n-\n-  use_operand_p use_p;\n-  gimple *use_not_stmt;\n-\n-  if (!single_imm_use (use_lhs, &use_p, &use_not_stmt)\n-      || !is_gimple_assign (use_not_stmt))\n-    return nullptr;\n-\n-  if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_not_stmt)))\n-    return nullptr;\n-\n-  tree use_not_lhs = gimple_assign_lhs (use_not_stmt);\n-  if (TREE_CODE (TREE_TYPE (use_not_lhs)) != BOOLEAN_TYPE)\n-    return nullptr;\n-\n-  gimple_stmt_iterator gsi;\n-  tree var = make_ssa_name (TREE_TYPE (lhs));\n-  /* use_stmt need to be removed after use_nop_stmt,\n-     so use_lhs can be released.  */\n-  gimple *use_stmt_removal = use_stmt;\n-  use_stmt = gimple_build_assign (var, BIT_AND_EXPR, lhs, and_mask);\n-  gsi = gsi_for_stmt (use_not_stmt);\n-  gsi_insert_before (&gsi, use_stmt, GSI_NEW_STMT);\n-  lhs = gimple_assign_lhs (use_not_stmt);\n-  gimple *g = gimple_build_assign (lhs, EQ_EXPR, var,\n-\t\t\t\t   build_zero_cst (TREE_TYPE (mask)));\n-  gsi_insert_after (&gsi, g, GSI_NEW_STMT);\n-  gsi = gsi_for_stmt (use_not_stmt);\n-  gsi_remove (&gsi, true);\n-  gsi = gsi_for_stmt (use_stmt_removal);\n-  gsi_remove (&gsi, true);\n-  return use_stmt;\n-}\n-\n-/* match.pd function to match atomic_bit_test_and pattern which\n-   has nop_convert:\n-     _1 = __atomic_fetch_or_4 (&v, 1, 0);\n-     _2 = (int) _1;\n-     _5 = _2 & 1;\n- */\n-extern bool gimple_nop_atomic_bit_test_and_p (tree, tree *,\n-\t\t\t\t\t      tree (*) (tree));\n-extern bool gimple_nop_convert (tree, tree*, tree (*) (tree));\n-\n-/* Optimize\n-     mask_2 = 1 << cnt_1;\n-     _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);\n-     _5 = _4 & mask_2;\n-   to\n-     _4 = .ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);\n-     _5 = _4;\n-   If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1\n-   is passed instead of 0, and the builtin just returns a zero\n-   or 1 value instead of the actual bit.\n-   Similarly for __sync_fetch_and_or_* (without the \", _3\" part\n-   in there), and/or if mask_2 is a power of 2 constant.\n-   Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT\n-   in that case.  And similarly for and instead of or, except that\n-   the second argument to the builtin needs to be one's complement\n-   of the mask instead of mask.  */\n-\n-static bool\n-optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,\n-\t\t\t      enum internal_fn fn, bool has_model_arg,\n-\t\t\t      bool after)\n-{\n-  gimple *call = gsi_stmt (*gsip);\n-  tree lhs = gimple_call_lhs (call);\n-  use_operand_p use_p;\n-  gimple *use_stmt;\n-  tree mask;\n-  optab optab;\n-\n-  if (!flag_inline_atomics\n-      || optimize_debug\n-      || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)\n-      || !lhs\n-      || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)\n-      || !single_imm_use (lhs, &use_p, &use_stmt)\n-      || !is_gimple_assign (use_stmt)\n-      || !gimple_vdef (call))\n-    return false;\n-\n-  switch (fn)\n-    {\n-    case IFN_ATOMIC_BIT_TEST_AND_SET:\n-      optab = atomic_bit_test_and_set_optab;\n-      break;\n-    case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:\n-      optab = atomic_bit_test_and_complement_optab;\n-      break;\n-    case IFN_ATOMIC_BIT_TEST_AND_RESET:\n-      optab = atomic_bit_test_and_reset_optab;\n-      break;\n-    default:\n-      return false;\n-    }\n-\n-  tree bit = nullptr;\n-\n-  mask = gimple_call_arg (call, 1);\n-  tree_code rhs_code = gimple_assign_rhs_code (use_stmt);\n-  if (rhs_code != BIT_AND_EXPR)\n-    {\n-      if (rhs_code != NOP_EXPR && rhs_code != BIT_NOT_EXPR)\n-\treturn false;\n-\n-      tree use_lhs = gimple_assign_lhs (use_stmt);\n-      if (TREE_CODE (use_lhs) == SSA_NAME\n-\t  && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))\n-\treturn false;\n-\n-      tree use_rhs = gimple_assign_rhs1 (use_stmt);\n-      if (lhs != use_rhs)\n-\treturn false;\n-\n-      if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)))\n-\t  == CODE_FOR_nothing)\n-\treturn false;\n-\n-      gimple *g;\n-      gimple_stmt_iterator gsi;\n-      tree var;\n-      int ibit = -1;\n-\n-      if (rhs_code == BIT_NOT_EXPR)\n-\t{\n-\t  g = convert_atomic_bit_not (fn, use_stmt, lhs, mask);\n-\t  if (!g)\n-\t    return false;\n-\t  use_stmt = g;\n-\t  ibit = 0;\n-\t}\n-      else if (TREE_CODE (TREE_TYPE (use_lhs)) == BOOLEAN_TYPE)\n-\t{\n-\t  tree and_mask;\n-\t  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)\n-\t    {\n-\t      /* MASK must be ~1.  */\n-\t      if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs),\n-\t\t\t\t\t\t   ~HOST_WIDE_INT_1),\n-\t\t\t\t    mask, 0))\n-\t\treturn false;\n-\n-\t      /* Convert\n-\t\t _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);\n-\t\t _4 = (_Bool) _1;\n-\t\t to\n-\t\t _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);\n-\t\t _5 = _1 & 1;\n-\t\t _4 = (_Bool) _5;\n-\t       */\n-\t      and_mask = build_int_cst (TREE_TYPE (lhs), 1);\n-\t    }\n-\t  else\n-\t    {\n-\t      and_mask = build_int_cst (TREE_TYPE (lhs), 1);\n-\t      if (!operand_equal_p (and_mask, mask, 0))\n-\t\treturn false;\n-\n-\t      /* Convert\n-\t\t _1 = __atomic_fetch_or_* (ptr_6, 1, _3);\n-\t\t _4 = (_Bool) _1;\n-\t\t to\n-\t\t _1 = __atomic_fetch_or_* (ptr_6, 1, _3);\n-\t\t _5 = _1 & 1;\n-\t\t _4 = (_Bool) _5;\n-\t       */\n-\t    }\n-\t  var = make_ssa_name (TREE_TYPE (use_rhs));\n-\t  replace_uses_by (use_rhs, var);\n-\t  g = gimple_build_assign (var, BIT_AND_EXPR, use_rhs,\n-\t\t\t\t   and_mask);\n-\t  gsi = gsi_for_stmt (use_stmt);\n-\t  gsi_insert_before (&gsi, g, GSI_NEW_STMT);\n-\t  use_stmt = g;\n-\t  ibit = 0;\n-\t}\n-      else if (TYPE_PRECISION (TREE_TYPE (use_lhs))\n-\t       <= TYPE_PRECISION (TREE_TYPE (use_rhs)))\n-\t{\n-\t  gimple *use_nop_stmt;\n-\t  if (!single_imm_use (use_lhs, &use_p, &use_nop_stmt)\n-\t      || (!is_gimple_assign (use_nop_stmt)\n-\t\t  && gimple_code (use_nop_stmt) != GIMPLE_COND))\n-\t    return false;\n-\t  /* Handle both\n-\t     _4 = _5 < 0;\n-\t     and\n-\t     if (_5 < 0)\n-\t   */\n-\t  tree use_nop_lhs = nullptr;\n-\t  rhs_code = ERROR_MARK;\n-\t  if (is_gimple_assign (use_nop_stmt))\n-\t    {\n-\t      use_nop_lhs = gimple_assign_lhs (use_nop_stmt);\n-\t      rhs_code = gimple_assign_rhs_code (use_nop_stmt);\n-\t    }\n-\t  if (!use_nop_lhs || rhs_code != BIT_AND_EXPR)\n-\t    {\n-\t      /* Also handle\n-\t\t if (_5 < 0)\n-\t       */\n-\t      if (use_nop_lhs\n-\t\t  && TREE_CODE (use_nop_lhs) == SSA_NAME\n-\t\t  && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_nop_lhs))\n-\t\treturn false;\n-\t      if (use_nop_lhs && rhs_code == BIT_NOT_EXPR)\n-\t\t{\n-\t\t  /* Handle\n-\t\t     _7 = ~_2;\n-\t\t   */\n-\t\t  g = convert_atomic_bit_not (fn, use_nop_stmt, lhs,\n-\t\t\t\t\t      mask);\n-\t\t  if (!g)\n-\t\t    return false;\n-\t\t  /* Convert\n-\t\t     _1 = __atomic_fetch_or_4 (ptr_6, 1, _3);\n-\t\t     _2 = (int) _1;\n-\t\t     _7 = ~_2;\n-\t\t     _5 = (_Bool) _7;\n-\t\t     to\n-\t\t     _1 = __atomic_fetch_or_4 (ptr_6, ~1, _3);\n-\t\t     _8 = _1 & 1;\n-\t\t     _5 = _8 == 0;\n-\t\t     and convert\n-\t\t     _1 = __atomic_fetch_and_4 (ptr_6, ~1, _3);\n-\t\t     _2 = (int) _1;\n-\t\t     _7 = ~_2;\n-\t\t     _5 = (_Bool) _7;\n-\t\t     to\n-\t\t     _1 = __atomic_fetch_and_4 (ptr_6, 1, _3);\n-\t\t     _8 = _1 & 1;\n-\t\t     _5 = _8 == 0;\n-\t\t   */\n-\t\t  gsi = gsi_for_stmt (use_stmt);\n-\t\t  gsi_remove (&gsi, true);\n-\t\t  use_stmt = g;\n-\t\t  ibit = 0;\n-\t\t}\n-\t      else\n-\t\t{\n-\t\t  tree cmp_rhs1, cmp_rhs2;\n-\t\t  if (use_nop_lhs)\n-\t\t    {\n-\t\t      /* Handle\n-\t\t\t _4 = _5 < 0;\n-\t\t       */\n-\t\t      if (TREE_CODE (TREE_TYPE (use_nop_lhs))\n-\t\t\t  != BOOLEAN_TYPE)\n-\t\t\treturn false;\n-\t\t      cmp_rhs1 = gimple_assign_rhs1 (use_nop_stmt);\n-\t\t      cmp_rhs2 = gimple_assign_rhs2 (use_nop_stmt);\n-\t\t    }\n-\t\t  else\n-\t\t    {\n-\t\t      /* Handle\n-\t\t\t if (_5 < 0)\n-\t\t       */\n-\t\t      rhs_code = gimple_cond_code (use_nop_stmt);\n-\t\t      cmp_rhs1 = gimple_cond_lhs (use_nop_stmt);\n-\t\t      cmp_rhs2 = gimple_cond_rhs (use_nop_stmt);\n-\t\t    }\n-\t\t  if (rhs_code != GE_EXPR && rhs_code != LT_EXPR)\n-\t\t    return false;\n-\t\t  if (use_lhs != cmp_rhs1)\n-\t\t    return false;\n-\t\t  if (!integer_zerop (cmp_rhs2))\n-\t\t    return false;\n-\n-\t\t  tree and_mask;\n-\n-\t\t  unsigned HOST_WIDE_INT bytes\n-\t\t    = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (use_rhs)));\n-\t\t  ibit = bytes * BITS_PER_UNIT - 1;\n-\t\t  unsigned HOST_WIDE_INT highest\n-\t\t    = HOST_WIDE_INT_1U << ibit;\n-\n-\t\t  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)\n-\t\t    {\n-\t\t      /* Get the signed maximum of the USE_RHS type.  */\n-\t\t      and_mask = build_int_cst (TREE_TYPE (use_rhs),\n-\t\t\t\t\t\thighest - 1);\n-\t\t      if (!operand_equal_p (and_mask, mask, 0))\n-\t\t\treturn false;\n-\n-\t\t      /* Convert\n-\t\t\t _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);\n-\t\t\t _5 = (signed int) _1;\n-\t\t\t _4 = _5 < 0 or _5 >= 0;\n-\t\t\t to\n-\t\t\t _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);\n-\t\t\t _6 = _1 & 0x80000000;\n-\t\t\t _4 = _6 != 0 or _6 == 0;\n-\t\t\t and convert\n-\t\t\t _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);\n-\t\t\t _5 = (signed int) _1;\n-\t\t\t if (_5 < 0 or _5 >= 0)\n-\t\t\t to\n-\t\t\t _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);\n-\t\t\t _6 = _1 & 0x80000000;\n-\t\t\t if (_6 != 0 or _6 == 0)\n-\t\t       */\n-\t\t      and_mask = build_int_cst (TREE_TYPE (use_rhs),\n-\t\t\t\t\t\thighest);\n-\t\t    }\n-\t\t  else\n-\t\t    {\n-\t\t      /* Get the signed minimum of the USE_RHS type.  */\n-\t\t      and_mask = build_int_cst (TREE_TYPE (use_rhs),\n-\t\t\t\t\t\thighest);\n-\t\t      if (!operand_equal_p (and_mask, mask, 0))\n-\t\t\treturn false;\n-\n-\t\t      /* Convert\n-\t\t\t _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);\n-\t\t\t _5 = (signed int) _1;\n-\t\t\t _4 = _5 < 0 or _5 >= 0;\n-\t\t\t to\n-\t\t\t _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);\n-\t\t\t _6 = _1 & 0x80000000;\n-\t\t\t _4 = _6 != 0 or _6 == 0;\n-\t\t\t and convert\n-\t\t\t _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);\n-\t\t\t _5 = (signed int) _1;\n-\t\t\t if (_5 < 0 or _5 >= 0)\n-\t\t\t to\n-\t\t\t _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);\n-\t\t\t _6 = _1 & 0x80000000;\n-\t\t\t if (_6 != 0 or _6 == 0)\n-\t\t       */\n-\t\t    }\n-\t\t  var = make_ssa_name (TREE_TYPE (use_rhs));\n-\t\t  gimple* use_stmt_removal = use_stmt;\n-\t\t  g = gimple_build_assign (var, BIT_AND_EXPR, use_rhs,\n-\t\t\t\t\t   and_mask);\n-\t\t  gsi = gsi_for_stmt (use_nop_stmt);\n-\t\t  gsi_insert_before (&gsi, g, GSI_NEW_STMT);\n-\t\t  use_stmt = g;\n-\t\t  rhs_code = rhs_code == GE_EXPR ? EQ_EXPR : NE_EXPR;\n-\t\t  tree const_zero = build_zero_cst (TREE_TYPE (use_rhs));\n-\t\t  if (use_nop_lhs)\n-\t\t    g = gimple_build_assign (use_nop_lhs, rhs_code,\n-\t\t\t\t\t     var, const_zero);\n-\t\t  else\n-\t\t    g = gimple_build_cond (rhs_code, var, const_zero,\n-\t\t\t\t\t   nullptr, nullptr);\n-\t\t  gsi_insert_after (&gsi, g, GSI_NEW_STMT);\n-\t\t  gsi = gsi_for_stmt (use_nop_stmt);\n-\t\t  gsi_remove (&gsi, true);\n-\t\t  gsi = gsi_for_stmt (use_stmt_removal);\n-\t\t  gsi_remove (&gsi, true);\n-\t\t}\n-\t    }\n-\t  else\n-\t    {\n-\t      tree match_op[3];\n-\t      gimple *g;\n-\t      if (!gimple_nop_atomic_bit_test_and_p (use_nop_lhs,\n-\t\t\t\t\t\t     &match_op[0], NULL)\n-\t\t  || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (match_op[2])\n-\t\t  || !single_imm_use (match_op[2], &use_p, &g)\n-\t\t  || !is_gimple_assign (g))\n-\t\treturn false;\n-\t      mask = match_op[0];\n-\t      if (TREE_CODE (match_op[1]) == INTEGER_CST)\n-\t\t{\n-\t\t  ibit = tree_log2 (match_op[1]);\n-\t\t  gcc_assert (ibit >= 0);\n-\t\t}\n-\t      else\n-\t\t{\n-\t\t  g = SSA_NAME_DEF_STMT (match_op[1]);\n-\t\t  gcc_assert (is_gimple_assign (g));\n-\t\t  bit = gimple_assign_rhs2 (g);\n-\t\t}\n-\t      /* Convert\n-\t\t _1 = __atomic_fetch_or_4 (ptr_6, mask, _3);\n-\t\t _2 = (int) _1;\n-\t\t _5 = _2 & mask;\n-\t\t to\n-\t\t _1 = __atomic_fetch_or_4 (ptr_6, mask, _3);\n-\t\t _6 = _1 & mask;\n-\t\t _5 = (int) _6;\n-\t\t and convert\n-\t\t _1 = ~mask_7;\n-\t\t _2 = (unsigned int) _1;\n-\t\t _3 = __atomic_fetch_and_4 (ptr_6, _2, 0);\n-\t\t _4 = (int) _3;\n-\t\t _5 = _4 & mask_7;\n-\t\t to\n-\t\t _1 = __atomic_fetch_and_* (ptr_6, ~mask_7, _3);\n-\t\t _12 = _3 & mask_7;\n-\t\t _5 = (int) _12;\n-\n-\t\t and Convert\n-\t\t _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3);\n-\t\t _2 = (short int) _1;\n-\t\t _5 = _2 & mask;\n-\t\t to\n-\t\t _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3);\n-\t\t _8 = _1 & mask;\n-\t\t _5 = (short int) _8;\n-\t      */\n-\t      gimple_seq stmts = NULL;\n-\t      match_op[1] = gimple_convert (&stmts,\n-\t\t\t\t\t    TREE_TYPE (use_rhs),\n-\t\t\t\t\t    match_op[1]);\n-\t      var = gimple_build (&stmts, BIT_AND_EXPR,\n-\t\t\t\t  TREE_TYPE (use_rhs), use_rhs, match_op[1]);\n-\t      gsi = gsi_for_stmt (use_stmt);\n-\t      gsi_remove (&gsi, true);\n-\t      release_defs (use_stmt);\n-\t      use_stmt = gimple_seq_last_stmt (stmts);\n-\t      gsi = gsi_for_stmt (use_nop_stmt);\n-\t      gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);\n-\t      gimple_assign_set_rhs_with_ops (&gsi, CONVERT_EXPR, var);\n-\t      update_stmt (use_nop_stmt);\n-\t    }\n-\t}\n-      else\n-\treturn false;\n-\n-      if (!bit)\n-\t{\n-\t  if (ibit < 0)\n-\t    gcc_unreachable ();\n-\t  bit = build_int_cst (TREE_TYPE (lhs), ibit);\n-\t}\n-    }\n-  else if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)))\n-\t   == CODE_FOR_nothing)\n-    return false;\n-\n-  tree use_lhs = gimple_assign_lhs (use_stmt);\n-  if (!use_lhs)\n-    return false;\n-\n-  if (!bit)\n-    {\n-      if (TREE_CODE (mask) == INTEGER_CST)\n-\t{\n-\t  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)\n-\t    mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);\n-\t  mask = fold_convert (TREE_TYPE (lhs), mask);\n-\t  int ibit = tree_log2 (mask);\n-\t  if (ibit < 0)\n-\t    return false;\n-\t  bit = build_int_cst (TREE_TYPE (lhs), ibit);\n-\t}\n-      else if (TREE_CODE (mask) == SSA_NAME)\n-\t{\n-\t  gimple *g = SSA_NAME_DEF_STMT (mask);\n-\t  tree match_op;\n-\t  if (gimple_nop_convert (mask, &match_op, NULL))\n-\t    {\n-\t      mask = match_op;\n-\t      if (TREE_CODE (mask) != SSA_NAME)\n-\t\treturn false;\n-\t      g = SSA_NAME_DEF_STMT (mask);\n-\t    }\n-\t  if (!is_gimple_assign (g))\n-\t    return false;\n-\n-\t  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)\n-\t    {\n-\t      if (gimple_assign_rhs_code (g) != BIT_NOT_EXPR)\n-\t\treturn false;\n-\t      mask = gimple_assign_rhs1 (g);\n-\t      if (TREE_CODE (mask) != SSA_NAME)\n-\t\treturn false;\n-\t      g = SSA_NAME_DEF_STMT (mask);\n-\t    }\n-\n-\t  if (!is_gimple_assign (g)\n-\t      || gimple_assign_rhs_code (g) != LSHIFT_EXPR\n-\t      || !integer_onep (gimple_assign_rhs1 (g)))\n-\t    return false;\n-\t  bit = gimple_assign_rhs2 (g);\n-\t}\n-      else\n-\treturn false;\n-\n-      tree cmp_mask;\n-      if (gimple_assign_rhs1 (use_stmt) == lhs)\n-\tcmp_mask = gimple_assign_rhs2 (use_stmt);\n-      else\n-\tcmp_mask = gimple_assign_rhs1 (use_stmt);\n-\n-      tree match_op;\n-      if (gimple_nop_convert (cmp_mask, &match_op, NULL))\n-\tcmp_mask = match_op;\n-\n-      if (!operand_equal_p (cmp_mask, mask, 0))\n-\treturn false;\n-    }\n-\n-  bool use_bool = true;\n-  bool has_debug_uses = false;\n-  imm_use_iterator iter;\n-  gimple *g;\n-\n-  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))\n-    use_bool = false;\n-  FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)\n-    {\n-      enum tree_code code = ERROR_MARK;\n-      tree op0 = NULL_TREE, op1 = NULL_TREE;\n-      if (is_gimple_debug (g))\n-\t{\n-\t  has_debug_uses = true;\n-\t  continue;\n-\t}\n-      else if (is_gimple_assign (g))\n-\tswitch (gimple_assign_rhs_code (g))\n-\t  {\n-\t  case COND_EXPR:\n-\t    op1 = gimple_assign_rhs1 (g);\n-\t    code = TREE_CODE (op1);\n-\t    if (TREE_CODE_CLASS (code) != tcc_comparison)\n-\t      break;\n-\t    op0 = TREE_OPERAND (op1, 0);\n-\t    op1 = TREE_OPERAND (op1, 1);\n-\t    break;\n-\t  case EQ_EXPR:\n-\t  case NE_EXPR:\n-\t    code = gimple_assign_rhs_code (g);\n-\t    op0 = gimple_assign_rhs1 (g);\n-\t    op1 = gimple_assign_rhs2 (g);\n-\t    break;\n-\t  default:\n-\t    break;\n-\t  }\n-      else if (gimple_code (g) == GIMPLE_COND)\n-\t{\n-\t  code = gimple_cond_code (g);\n-\t  op0 = gimple_cond_lhs (g);\n-\t  op1 = gimple_cond_rhs (g);\n-\t}\n-\n-      if ((code == EQ_EXPR || code == NE_EXPR)\n-\t  && op0 == use_lhs\n-\t  && integer_zerop (op1))\n-\t{\n-\t  use_operand_p use_p;\n-\t  int n = 0;\n-\t  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)\n-\t    n++;\n-\t  if (n == 1)\n-\t    continue;\n-\t}\n-\n-      use_bool = false;\n-      break;\n-    }\n-\n-  tree new_lhs = make_ssa_name (TREE_TYPE (lhs));\n-  tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);\n-  if (has_model_arg)\n-    g = gimple_build_call_internal (fn, 5, gimple_call_arg (call, 0),\n-\t\t\t\t    bit, flag, gimple_call_arg (call, 2),\n-\t\t\t\t    gimple_call_fn (call));\n-  else\n-    g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),\n-\t\t\t\t    bit, flag, gimple_call_fn (call));\n-  gimple_call_set_lhs (g, new_lhs);\n-  gimple_set_location (g, gimple_location (call));\n-  gimple_move_vops (g, call);\n-  bool throws = stmt_can_throw_internal (cfun, call);\n-  gimple_call_set_nothrow (as_a <gcall *> (g),\n-\t\t\t   gimple_call_nothrow_p (as_a <gcall *> (call)));\n-  gimple_stmt_iterator gsi = *gsip;\n-  gsi_insert_after (&gsi, g, GSI_NEW_STMT);\n-  edge e = NULL;\n-  if (throws)\n-    {\n-      maybe_clean_or_replace_eh_stmt (call, g);\n-      if (after || (use_bool && has_debug_uses))\n-\te = find_fallthru_edge (gsi_bb (gsi)->succs);\n-    }\n-  if (after)\n-    {\n-      /* The internal function returns the value of the specified bit\n-\t before the atomic operation.  If we are interested in the value\n-\t of the specified bit after the atomic operation (makes only sense\n-\t for xor, otherwise the bit content is compile time known),\n-\t we need to invert the bit.  */\n-      tree mask_convert = mask;\n-      gimple_seq stmts = NULL;\n-      if (!use_bool)\n-\tmask_convert = gimple_convert (&stmts, TREE_TYPE (lhs), mask);\n-      new_lhs = gimple_build (&stmts, BIT_XOR_EXPR, TREE_TYPE (lhs), new_lhs,\n-\t\t\t      use_bool ? build_int_cst (TREE_TYPE (lhs), 1)\n-\t\t\t\t       : mask_convert);\n-      if (throws)\n-\t{\n-\t  gsi_insert_seq_on_edge_immediate (e, stmts);\n-\t  gsi = gsi_for_stmt (gimple_seq_last (stmts));\n-\t}\n-      else\n-\tgsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);\n-    }\n-  if (use_bool && has_debug_uses)\n-    {\n-      tree temp = NULL_TREE;\n-      if (!throws || after || single_pred_p (e->dest))\n-\t{\n-\t  temp = build_debug_expr_decl (TREE_TYPE (lhs));\n-\t  tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);\n-\t  g = gimple_build_debug_bind (temp, t, g);\n-\t  if (throws && !after)\n-\t    {\n-\t      gsi = gsi_after_labels (e->dest);\n-\t      gsi_insert_before (&gsi, g, GSI_SAME_STMT);\n-\t    }\n-\t  else\n-\t    gsi_insert_after (&gsi, g, GSI_NEW_STMT);\n-\t}\n-      FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)\n-\tif (is_gimple_debug (g))\n-\t  {\n-\t    use_operand_p use_p;\n-\t    if (temp == NULL_TREE)\n-\t      gimple_debug_bind_reset_value (g);\n-\t    else\n-\t      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)\n-\t\tSET_USE (use_p, temp);\n-\t    update_stmt (g);\n-\t  }\n-    }\n-  SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)\n-    = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);\n-  replace_uses_by (use_lhs, new_lhs);\n-  gsi = gsi_for_stmt (use_stmt);\n-  gsi_remove (&gsi, true);\n-  release_defs (use_stmt);\n-  gsi_remove (gsip, true);\n-  release_ssa_name (lhs);\n-  return true;\n-}\n-\n-/* Optimize\n-     _4 = __atomic_add_fetch_* (ptr_6, arg_2, _3);\n-     _5 = _4 == 0;\n-   to\n-     _4 = .ATOMIC_ADD_FETCH_CMP_0 (EQ_EXPR, ptr_6, arg_2, _3);\n-     _5 = _4;\n-   Similarly for __sync_add_and_fetch_* (without the \", _3\" part\n-   in there).  */\n-\n-static bool\n-optimize_atomic_op_fetch_cmp_0 (gimple_stmt_iterator *gsip,\n-\t\t\t\tenum internal_fn fn, bool has_model_arg)\n-{\n-  gimple *call = gsi_stmt (*gsip);\n-  tree lhs = gimple_call_lhs (call);\n-  use_operand_p use_p;\n-  gimple *use_stmt;\n-\n-  if (!flag_inline_atomics\n-      || optimize_debug\n-      || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)\n-      || !lhs\n-      || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)\n-      || !single_imm_use (lhs, &use_p, &use_stmt)\n-      || !gimple_vdef (call))\n-    return false;\n-\n-  optab optab;\n-  switch (fn)\n-    {\n-    case IFN_ATOMIC_ADD_FETCH_CMP_0:\n-      optab = atomic_add_fetch_cmp_0_optab;\n-      break;\n-    case IFN_ATOMIC_SUB_FETCH_CMP_0:\n-      optab = atomic_sub_fetch_cmp_0_optab;\n-      break;\n-    case IFN_ATOMIC_AND_FETCH_CMP_0:\n-      optab = atomic_and_fetch_cmp_0_optab;\n-      break;\n-    case IFN_ATOMIC_OR_FETCH_CMP_0:\n-      optab = atomic_or_fetch_cmp_0_optab;\n-      break;\n-    case IFN_ATOMIC_XOR_FETCH_CMP_0:\n-      optab = atomic_xor_fetch_cmp_0_optab;\n-      break;\n-    default:\n-      return false;\n-    }\n-\n-  if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)))\n-      == CODE_FOR_nothing)\n-    return false;\n-\n-  tree use_lhs = lhs;\n-  if (gimple_assign_cast_p (use_stmt))\n-    {\n-      use_lhs = gimple_assign_lhs (use_stmt);\n-      if (!tree_nop_conversion_p (TREE_TYPE (use_lhs), TREE_TYPE (lhs))\n-\t  || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))\n-\t      && !POINTER_TYPE_P (TREE_TYPE (use_lhs)))\n-\t  || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs)\n-\t  || !single_imm_use (use_lhs, &use_p, &use_stmt))\n-\treturn false;\n-    }\n-  enum tree_code code = ERROR_MARK;\n-  tree op0 = NULL_TREE, op1 = NULL_TREE;\n-  if (is_gimple_assign (use_stmt))\n-    switch (gimple_assign_rhs_code (use_stmt))\n-      {\n-      case COND_EXPR:\n-\top1 = gimple_assign_rhs1 (use_stmt);\n-\tcode = TREE_CODE (op1);\n-\tif (TREE_CODE_CLASS (code) == tcc_comparison)\n-\t  {\n-\t    op0 = TREE_OPERAND (op1, 0);\n-\t    op1 = TREE_OPERAND (op1, 1);\n-\t  }\n-\tbreak;\n-      default:\n-\tcode = gimple_assign_rhs_code (use_stmt);\n-\tif (TREE_CODE_CLASS (code) == tcc_comparison)\n-\t  {\n-\t    op0 = gimple_assign_rhs1 (use_stmt);\n-\t    op1 = gimple_assign_rhs2 (use_stmt);\n-\t  }\n-\tbreak;\n-      }\n-  else if (gimple_code (use_stmt) == GIMPLE_COND)\n-    {\n-      code = gimple_cond_code (use_stmt);\n-      op0 = gimple_cond_lhs (use_stmt);\n-      op1 = gimple_cond_rhs (use_stmt);\n-    }\n-\n-  switch (code)\n-    {\n-    case LT_EXPR:\n-    case LE_EXPR:\n-    case GT_EXPR:\n-    case GE_EXPR:\n-      if (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))\n-\t  || TREE_CODE (TREE_TYPE (use_lhs)) == BOOLEAN_TYPE\n-\t  || TYPE_UNSIGNED (TREE_TYPE (use_lhs)))\n-\treturn false;\n-      /* FALLTHRU */\n-    case EQ_EXPR:\n-    case NE_EXPR:\n-      if (op0 == use_lhs && integer_zerop (op1))\n-\tbreak;\n-      return false;\n-    default:\n-      return false;\n-    }\n-\n-  int encoded;\n-  switch (code)\n-    {\n-    /* Use special encoding of the operation.  We want to also\n-       encode the mode in the first argument and for neither EQ_EXPR\n-       etc. nor EQ etc. we can rely it will fit into QImode.  */\n-    case EQ_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_EQ; break;\n-    case NE_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_NE; break;\n-    case LT_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_LT; break;\n-    case LE_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_LE; break;\n-    case GT_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_GT; break;\n-    case GE_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_GE; break;\n-    default: gcc_unreachable ();\n-    }\n-\n-  tree new_lhs = make_ssa_name (boolean_type_node);\n-  gimple *g;\n-  tree flag = build_int_cst (TREE_TYPE (lhs), encoded);\n-  if (has_model_arg)\n-    g = gimple_build_call_internal (fn, 5, flag,\n-\t\t\t\t    gimple_call_arg (call, 0),\n-\t\t\t\t    gimple_call_arg (call, 1),\n-\t\t\t\t    gimple_call_arg (call, 2),\n-\t\t\t\t    gimple_call_fn (call));\n-  else\n-    g = gimple_build_call_internal (fn, 4, flag,\n-\t\t\t\t    gimple_call_arg (call, 0),\n-\t\t\t\t    gimple_call_arg (call, 1),\n-\t\t\t\t    gimple_call_fn (call));\n-  gimple_call_set_lhs (g, new_lhs);\n-  gimple_set_location (g, gimple_location (call));\n-  gimple_move_vops (g, call);\n-  bool throws = stmt_can_throw_internal (cfun, call);\n-  gimple_call_set_nothrow (as_a <gcall *> (g),\n-\t\t\t   gimple_call_nothrow_p (as_a <gcall *> (call)));\n-  gimple_stmt_iterator gsi = *gsip;\n-  gsi_insert_after (&gsi, g, GSI_SAME_STMT);\n-  if (throws)\n-    maybe_clean_or_replace_eh_stmt (call, g);\n-  if (is_gimple_assign (use_stmt))\n-    switch (gimple_assign_rhs_code (use_stmt))\n-      {\n-      case COND_EXPR:\n-\tgimple_assign_set_rhs1 (use_stmt, new_lhs);\n-\tbreak;\n-      default:\n-\tgsi = gsi_for_stmt (use_stmt);\n-\tif (tree ulhs = gimple_assign_lhs (use_stmt))\n-\t  if (useless_type_conversion_p (TREE_TYPE (ulhs),\n-\t\t\t\t\t boolean_type_node))\n-\t    {\n-\t      gimple_assign_set_rhs_with_ops (&gsi, SSA_NAME, new_lhs);\n-\t      break;\n-\t    }\n-\tgimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, new_lhs);\n-\tbreak;\n-      }\n-  else if (gimple_code (use_stmt) == GIMPLE_COND)\n-    {\n-      gcond *use_cond = as_a <gcond *> (use_stmt);\n-      gimple_cond_set_code (use_cond, NE_EXPR);\n-      gimple_cond_set_lhs (use_cond, new_lhs);\n-      gimple_cond_set_rhs (use_cond, boolean_false_node);\n-    }\n-\n-  update_stmt (use_stmt);\n-  if (use_lhs != lhs)\n-    {\n-      gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (use_lhs));\n-      gsi_remove (&gsi, true);\n-      release_ssa_name (use_lhs);\n-    }\n-  gsi_remove (gsip, true);\n-  release_ssa_name (lhs);\n-  return true;\n-}\n-\n /* A simple pass that attempts to fold all builtin functions.  This pass\n    is run after we've propagated as many constants as we can.  */\n \n@@ -4008,8 +3132,6 @@ pass_fold_builtins::execute (function *fun)\n       for (i = gsi_start_bb (bb); !gsi_end_p (i); )\n \t{\n \t  gimple *stmt, *old_stmt;\n-\t  tree callee;\n-\t  enum built_in_function fcode;\n \n \t  stmt = gsi_stmt (i);\n \n@@ -4019,128 +3141,26 @@ pass_fold_builtins::execute (function *fun)\n \t      continue;\n \t    }\n \n-\t  callee = gimple_call_fndecl (stmt);\n-\t  if (!callee\n-\t      && gimple_call_internal_p (stmt))\n+\t  /* Only fold internal calls\n+\t     or normal builtins. */\n+\t  if (!gimple_call_internal_p (stmt)\n+\t      && !gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))\n \t    {\n-\t      if (!fold_stmt (&i))\n-\t\t{\n-\t\t  gsi_next (&i);\n-\t\t  continue;\n-\t\t}\n-\t      if (dump_file && (dump_flags & TDF_DETAILS))\n-\t\t{\n-\t\t  fprintf (dump_file, \"Simplified\\n  \");\n-\t\t  print_gimple_stmt (dump_file, stmt, 0, dump_flags);\n-\t\t}\n-\n-\t      old_stmt = stmt;\n-\t      stmt = gsi_stmt (i);\n-\t      update_stmt (stmt);\n-\n-\t      if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)\n-\t\t  && gimple_purge_dead_eh_edges (bb))\n-\t\tcfg_changed = true;\n-\n-\t      if (dump_file && (dump_flags & TDF_DETAILS))\n-\t\t{\n-\t\t  fprintf (dump_file, \"to\\n  \");\n-\t\t  print_gimple_stmt (dump_file, stmt, 0, dump_flags);\n-\t\t  fprintf (dump_file, \"\\n\");\n-\t\t}\n \t      gsi_next (&i);\n \t      continue;\n \t    }\n-\t  if (!callee || !fndecl_built_in_p (callee, BUILT_IN_NORMAL))\n+\t  if (!fold_stmt (&i))\n \t    {\n \t      gsi_next (&i);\n \t      continue;\n \t    }\n-\n-\t  fcode = DECL_FUNCTION_CODE (callee);\n-\t  if (fold_stmt (&i))\n-\t    ;\n-\t  else\n-\t    {\n-\t      tree result = NULL_TREE;\n-\t      switch (DECL_FUNCTION_CODE (callee))\n-\t\t{\n-#define CASE_ATOMIC(NAME) \t\t\t\\\n-\t\tcase BUILT_IN_##NAME##_1:\t\\\n-\t\tcase BUILT_IN_##NAME##_2:\t\\\n-\t\tcase BUILT_IN_##NAME##_4:\t\\\n-\t\tcase BUILT_IN_##NAME##_8:\t\\\n-\t\tcase BUILT_IN_##NAME##_16\n-#define CASE_ATOMIC_CMP0(ATOMIC, SYNC) \t\t\t\t\\\n-\t\tCASE_ATOMIC(ATOMIC_##ATOMIC):\t\t\t\\\n-\t\t  optimize_atomic_op_fetch_cmp_0 (&i,\t\t\\\n-\t\t\t\t\t\t  IFN_ATOMIC_##ATOMIC##_CMP_0, \\\n-\t\t\t\t\t\t  true);\t\\\n-\t\t  break;\t\t\t\t\t\\\n-\t\tCASE_ATOMIC(SYNC_##SYNC):\t\t\t\\\n-\t\t  optimize_atomic_op_fetch_cmp_0 (&i,\t\t\\\n-\t\t\t\t\t\t  IFN_ATOMIC_##ATOMIC##_CMP_0, \\\n-\t\t\t\t\t\t  false);\t\\\n-\t\t  break;\n-\n-\n-\t\tCASE_ATOMIC_CMP0(ADD_FETCH, ADD_AND_FETCH)\n-\t\tCASE_ATOMIC_CMP0(SUB_FETCH, SUB_AND_FETCH)\n-\t\tCASE_ATOMIC_CMP0(AND_FETCH, AND_AND_FETCH)\n-\t\tCASE_ATOMIC_CMP0(OR_FETCH, OR_AND_FETCH)\n-#define CASE_ATOMIC_BIT_TEST_AND(ATOMIC, SYNC, FN, AFTER) \t\t\t\\\n-\t\tCASE_ATOMIC(ATOMIC_##ATOMIC):\t\t\t\t\t\\\n-\t\t  optimize_atomic_bit_test_and (&i,\t\t\t\t\\\n-\t\t\t\t\t\tIFN_ATOMIC_BIT_TEST_AND_##FN,\t\\\n-\t\t\t\t\t\ttrue, AFTER);\t\t\t\\\n-\t\t  break;\t\t\t\t\t\t\t\\\n-\t\tCASE_ATOMIC(SYNC_##SYNC):\t\t\t\t\t\\\n-\t\t  optimize_atomic_bit_test_and (&i,\t\t\t\t\\\n-\t\t\t\t\t\tIFN_ATOMIC_BIT_TEST_AND_##FN, \t\\\n-\t\t\t\t\t\tfalse, AFTER);\t\t\t\\\n-\t\t  break;\n-\t\tCASE_ATOMIC_BIT_TEST_AND(FETCH_OR,  FETCH_AND_OR,  SET, false)\n-\t\tCASE_ATOMIC_BIT_TEST_AND(FETCH_XOR, FETCH_AND_XOR, COMPLEMENT, false)\n-\t\tCASE_ATOMIC_BIT_TEST_AND(FETCH_AND, FETCH_AND_AND, RESET, false)\n-\n-\t\tCASE_ATOMIC(ATOMIC_XOR_FETCH):\n-\t\t  if (optimize_atomic_bit_test_and\n-\t\t\t(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true))\n-\t\t    break;\n-\t\t  optimize_atomic_op_fetch_cmp_0 (&i,\n-\t\t\t\t\t\t  IFN_ATOMIC_XOR_FETCH_CMP_0,\n-\t\t\t\t\t\t  true);\n-\t\t  break;\n-\t\tCASE_ATOMIC(SYNC_XOR_AND_FETCH):\n-\t\t  if (optimize_atomic_bit_test_and\n-\t\t\t(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true))\n-\t\t    break;\n-\t\t  optimize_atomic_op_fetch_cmp_0 (&i,\n-\t\t\t\t\t\t  IFN_ATOMIC_XOR_FETCH_CMP_0,\n-\t\t\t\t\t\t  false);\n-\t\t  break;\n-\n-\t\tdefault:;\n-\t\t}\n-\n-\t      if (!result)\n-\t\t{\n-\t\t  gsi_next (&i);\n-\t\t  continue;\n-\t\t}\n-\n-\t      gimplify_and_update_call_from_tree (&i, result);\n-\t    }\n-\n-\t  todoflags |= TODO_update_address_taken;\n-\n \t  if (dump_file && (dump_flags & TDF_DETAILS))\n \t    {\n \t      fprintf (dump_file, \"Simplified\\n  \");\n \t      print_gimple_stmt (dump_file, stmt, 0, dump_flags);\n \t    }\n \n-          old_stmt = stmt;\n+\t  old_stmt = stmt;\n \t  stmt = gsi_stmt (i);\n \t  update_stmt (stmt);\n \n@@ -4154,18 +3174,7 @@ pass_fold_builtins::execute (function *fun)\n \t      print_gimple_stmt (dump_file, stmt, 0, dump_flags);\n \t      fprintf (dump_file, \"\\n\");\n \t    }\n-\n-\t  /* Retry the same statement if it changed into another\n-\t     builtin, there might be new opportunities now.  */\n-          if (gimple_code (stmt) != GIMPLE_CALL)\n-\t    {\n-\t      gsi_next (&i);\n-\t      continue;\n-\t    }\n-\t  callee = gimple_call_fndecl (stmt);\n-\t  if (!callee\n-\t      || !fndecl_built_in_p (callee, fcode))\n-\t    gsi_next (&i);\n+\t  gsi_next (&i);\n \t}\n     }\n \n",
    "prefixes": [
        "v1",
        "09/10"
    ]
}