@@ -95,7 +95,8 @@ class best_match
: m_goal (goal_traits::get_string (goal)),
m_goal_len (goal_traits::get_length (goal)),
m_best_candidate (NULL),
- m_best_distance (best_distance_so_far)
+ m_best_distance (best_distance_so_far),
+ m_best_candidate_len (0)
{}
/* Compare the edit distance between CANDIDATE and m_goal,
@@ -64,7 +64,7 @@ public:
back_threader_profitability (bool speed_p)
: m_speed_p (speed_p)
{ }
- bool profitable_path_p (const vec<basic_block> &, tree name, edge taken,
+ bool profitable_path_p (const vec<basic_block> &, edge taken,
bool *irreducible_loop = NULL);
private:
const bool m_speed_p;
@@ -241,7 +241,7 @@ back_threader::maybe_register_path ()
else
{
bool irreducible = false;
- if (m_profit.profitable_path_p (m_path, m_name, taken_edge,
+ if (m_profit.profitable_path_p (m_path, taken_edge,
&irreducible)
&& debug_counter ()
&& m_registry.register_path (m_path, taken_edge))
@@ -348,7 +348,7 @@ back_threader::find_paths_to_names (basic_block bb, bitmap interesting)
// Try to resolve the path without looking back.
if (m_path.length () > 1
- && (!m_profit.profitable_path_p (m_path, m_name, NULL)
+ && (!m_profit.profitable_path_p (m_path, NULL)
|| maybe_register_path ()))
;
@@ -529,9 +529,6 @@ back_threader::debug ()
/* Examine jump threading path PATH and return TRUE if it is profitable to
thread it, otherwise return FALSE.
- NAME is the SSA_NAME of the variable we found to have a constant
- value on PATH. If unknown, SSA_NAME is NULL.
-
If the taken edge out of the path is known ahead of time it is passed in
TAKEN_EDGE, otherwise it is NULL.
@@ -543,7 +540,6 @@ back_threader::debug ()
bool
back_threader_profitability::profitable_path_p (const vec<basic_block> &m_path,
- tree name,
edge taken_edge,
bool *creates_irreducible_loop)
{
@@ -600,42 +596,27 @@ back_threader_profitability::profitable_path_p (const vec<basic_block> &m_path,
if (j < m_path.length () - 1)
{
int orig_n_insns = n_insns;
- /* PHIs in the path will create degenerate PHIS in the
- copied path which will then get propagated away, so
- looking at just the duplicate path the PHIs would
- seem unimportant.
-
- But those PHIs, because they're assignments to objects
- typically with lives that exist outside the thread path,
- will tend to generate PHIs (or at least new PHI arguments)
- at points where we leave the thread path and rejoin
- the original blocks. So we do want to account for them.
-
- We ignore virtual PHIs. We also ignore cases where BB
- has a single incoming edge. That's the most common
- degenerate PHI we'll see here. Finally we ignore PHIs
- that are associated with the value we're tracking as
- that object likely dies. */
- if (EDGE_COUNT (bb->succs) > 1 && EDGE_COUNT (bb->preds) > 1)
+ /* If the path has exits other than the known taken_edge from
+ block j == 0 then those will create new edges into the exit
+ destination, increasing the number of PHI arguments there.
+ Account for an extra copy there.
+ PHI nodes on the path itself all become degenerate and
+ propagate out on GIMPLE. */
+ if (j != 0 && EDGE_COUNT (bb->succs) > 1)
{
- for (gphi_iterator gsip = gsi_start_phis (bb);
- !gsi_end_p (gsip);
- gsi_next (&gsip))
- {
- gphi *phi = gsip.phi ();
- tree dst = gimple_phi_result (phi);
-
- /* Note that if both NAME and DST are anonymous
- SSA_NAMEs, then we do not have enough information
- to consider them associated. */
- if (dst != name
- && name
- && TREE_CODE (name) == SSA_NAME
- && (SSA_NAME_VAR (dst) != SSA_NAME_VAR (name)
- || !SSA_NAME_VAR (dst))
- && !virtual_operand_p (dst))
- ++n_insns;
- }
+ edge_iterator ei;
+ edge e;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->dest != m_path[j - 1])
+ for (gphi_iterator gsip = gsi_start_phis (e->dest);
+ !gsi_end_p (gsip);
+ gsi_next (&gsip))
+ {
+ gphi *phi = gsip.phi ();
+ tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e);
+ if (!virtual_operand_p (arg))
+ ++n_insns;
+ }
}
if (!contains_hot_bb && m_speed_p)
@@ -756,8 +756,9 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
attr = &thread_attr;
}
- start_data = gomp_alloca (sizeof (struct gomp_thread_start_data)
- * (nthreads - i));
+ if (i < nthreads)
+ start_data = gomp_alloca (sizeof (struct gomp_thread_start_data)
+ * (nthreads - i));
/* Launch new threads. */
for (; i < nthreads; ++i)