From patchwork Wed Jan 8 14:28:38 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Richard Biener X-Patchwork-Id: 1219703 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=gcc.gnu.org (client-ip=209.132.180.131; helo=sourceware.org; envelope-from=gcc-patches-return-516913-incoming=patchwork.ozlabs.org@gcc.gnu.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=suse.de Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=gcc.gnu.org header.i=@gcc.gnu.org header.b="iOyV7JaC"; dkim-atps=neutral Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 47tBSh1Gr3z9s29 for ; Thu, 9 Jan 2020 01:28:50 +1100 (AEDT) DomainKey-Signature: a=rsa-sha1; c=nofws; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender:date :from:to:subject:message-id:mime-version:content-type; q=dns; s= default; b=iOdEVfsOJ7CUl0e4ZHqpWp55hL7oA+m1Sn7UOQPUIHxZj0dwngeq0 hwV1XZNmW/vfqi3O5NCmIzCJi1rumtjKQLjW+qrtEv2mDX3V/L5NQPxgk4PqsUBA ydqE+WBLKeQZVryzddkwQY+0jQ09ht9nfmnNidgWyvX0kq3wHPoxgk= DKIM-Signature: v=1; a=rsa-sha1; c=relaxed; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender:date :from:to:subject:message-id:mime-version:content-type; s= default; bh=1vgMrlGhE/dF8xJpkG8hq1IHZ+c=; b=iOyV7JaCrLYvAPeiXvdz N+qpZP1svDL2WI74Svbq7B7mhHwMdsy2oGKCgSEc0C6x7TLFSUSR/FHcMpqPoP1e 0uzNFMxDfuEwGCiDopRcMIsAttlEdz1fAfVVO1Jl/hatz84OZGTrEaa+pACZLEiQ 4GOm2Dz5LqnJ1nKXIcW8v+c= Received: (qmail 34738 invoked by alias); 8 Jan 2020 14:28:42 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Delivered-To: mailing list gcc-patches@gcc.gnu.org Received: (qmail 34665 invoked by uid 89); 8 Jan 2020 14:28:42 -0000 Authentication-Results: sourceware.org; auth=none X-Spam-SWARE-Status: No, score=-11.0 required=5.0 tests=AWL, BAYES_00, GIT_PATCH_2, GIT_PATCH_3, SPF_PASS autolearn=ham version=3.3.1 spammy=arrange, dry X-HELO: mx2.suse.de Received: from mx2.suse.de (HELO mx2.suse.de) (195.135.220.15) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with ESMTP; Wed, 08 Jan 2020 14:28:40 +0000 Received: from relay2.suse.de (unknown [195.135.220.254]) by mx2.suse.de (Postfix) with ESMTP id 7FA5EAC69 for ; Wed, 8 Jan 2020 14:28:38 +0000 (UTC) Date: Wed, 8 Jan 2020 15:28:38 +0100 (CET) From: Richard Biener To: gcc-patches@gcc.gnu.org Subject: [PATCH] Make sinking clobbers across EH reliable Message-ID: User-Agent: Alpine 2.21 (LSU 202 2017-01-01) MIME-Version: 1.0 This makes $subject reliably catch secondary opportunities (which cause quadraticness in PR93199). It also makes virtual operand updating in this process a bit cheaper. This is a first step with the second addressing the quadraticness (either by some algorithmic changes or by capping the number of clobbers to sink if the former turns out too ugly) Bootstrapped and tested on x86_64-unknown-linux-gnu, applied to trunk. Richard. 2020-01-08 Richard Biener PR middle-end/93199 * tree-eh.c (sink_clobbers): Update virtual operands for the first and last stmt only. Add a dry-run capability. (pass_lower_eh_dispatch::execute): Perform clobber sinking after CFG manipulations and in RPO order to catch all secondary opportunities reliably. Index: gcc/tree-eh.c =================================================================== --- gcc/tree-eh.c (revision 280000) +++ gcc/tree-eh.c (working copy) @@ -3550,10 +3550,11 @@ optimize_clobbers (basic_block bb) } /* Try to sink var = {v} {CLOBBER} stmts followed just by - internal throw to successor BB. */ + internal throw to successor BB. If FOUND_OPPORTUNITY is not NULL + then do not perform the optimization but set *FOUND_OPPORTUNITY to true. */ static int -sink_clobbers (basic_block bb) +sink_clobbers (basic_block bb, bool *found_opportunity = NULL) { edge e; edge_iterator ei; @@ -3591,13 +3592,19 @@ sink_clobbers (basic_block bb) if (!any_clobbers) return 0; + /* If this was a dry run, tell it we found clobbers to sink. */ + if (found_opportunity) + { + *found_opportunity = true; + return 0; + } + edge succe = single_succ_edge (bb); succbb = succe->dest; /* See if there is a virtual PHI node to take an updated virtual operand from. */ gphi *vphi = NULL; - tree vuse = NULL_TREE; for (gphi_iterator gpi = gsi_start_phis (succbb); !gsi_end_p (gpi); gsi_next (&gpi)) { @@ -3605,11 +3612,12 @@ sink_clobbers (basic_block bb) if (virtual_operand_p (res)) { vphi = gpi.phi (); - vuse = res; break; } } + gimple *first_sunk = NULL; + gimple *last_sunk = NULL; dgsi = gsi_after_labels (succbb); gsi = gsi_last_bb (bb); for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) @@ -3641,36 +3649,37 @@ sink_clobbers (basic_block bb) forwarder edge we can keep virtual operands in place. */ gsi_remove (&gsi, false); gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT); - - /* But adjust virtual operands if we sunk across a PHI node. */ - if (vuse) + if (!first_sunk) + first_sunk = stmt; + last_sunk = stmt; + } + if (first_sunk) + { + /* Adjust virtual operands if we sunk across a virtual PHI. */ + if (vphi) { - gimple *use_stmt; imm_use_iterator iter; use_operand_p use_p; - FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse) + gimple *use_stmt; + tree phi_def = gimple_phi_result (vphi); + FOR_EACH_IMM_USE_STMT (use_stmt, iter, phi_def) FOR_EACH_IMM_USE_ON_STMT (use_p, iter) - SET_USE (use_p, gimple_vdef (stmt)); - if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse)) + SET_USE (use_p, gimple_vdef (first_sunk)); + if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def)) { - SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1; - SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0; + SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (first_sunk)) = 1; + SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def) = 0; } - /* Adjust the incoming virtual operand. */ - SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt)); - SET_USE (gimple_vuse_op (stmt), vuse); + SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), + gimple_vuse (last_sunk)); + SET_USE (gimple_vuse_op (last_sunk), phi_def); } /* If there isn't a single predecessor but no virtual PHI node arrange for virtual operands to be renamed. */ - else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P - && !single_pred_p (succbb)) + else if (!single_pred_p (succbb) + && TREE_CODE (gimple_vuse (last_sunk)) == SSA_NAME) { - /* In this case there will be no use of the VDEF of this stmt. - ??? Unless this is a secondary opportunity and we have not - removed unreachable blocks yet, so we cannot assert this. - Which also means we will end up renaming too many times. */ - SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun)); - mark_virtual_operands_for_renaming (cfun); + mark_virtual_operand_for_renaming (gimple_vuse (last_sunk)); todo |= TODO_update_ssa_only_virtuals; } } @@ -3863,6 +3872,7 @@ pass_lower_eh_dispatch::execute (functio basic_block bb; int flags = 0; bool redirected = false; + bool any_resx_to_process = false; assign_filter_values (); @@ -3879,18 +3889,37 @@ pass_lower_eh_dispatch::execute (functio } else if (gimple_code (last) == GIMPLE_RESX) { - if (stmt_can_throw_external (cfun, last)) + if (stmt_can_throw_external (fun, last)) optimize_clobbers (bb); - else - flags |= sink_clobbers (bb); + else if (!any_resx_to_process) + sink_clobbers (bb, &any_resx_to_process); } } - if (redirected) { free_dominance_info (CDI_DOMINATORS); delete_unreachable_blocks (); } + + if (any_resx_to_process) + { + /* Make sure to catch all secondary sinking opportunities by processing + blocks in RPO order and after all CFG modifications from lowering + and unreachable block removal. */ + int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (fun)); + int rpo_n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, false); + for (int i = 0; i < rpo_n; ++i) + { + bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]); + gimple *last = last_stmt (bb); + if (last + && gimple_code (last) == GIMPLE_RESX + && !stmt_can_throw_external (fun, last)) + flags |= sink_clobbers (bb); + } + free (rpo); + } + return flags; }