@@ -72,7 +72,7 @@ vect_lanes_optab_supported_p (const char *name, convert_optab optab,
limit_p = !targetm.array_mode_supported_p (mode, count);
if (!int_mode_for_size (bits, limit_p).exists (&array_mode))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no array mode for %s["
HOST_WIDE_INT_PRINT_DEC "]\n",
@@ -83,14 +83,14 @@ vect_lanes_optab_supported_p (const char *name, convert_optab optab,
if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cannot use %s<%s><%s>\n", name,
GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
return false;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
GET_MODE_NAME (mode));
@@ -181,7 +181,7 @@ vect_check_nonzero_value (loop_vec_info loop_vinfo, tree value)
if (checks[i] == value)
return;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "need run-time check that ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, value);
@@ -345,7 +345,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
|| STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias not supported for: "
@@ -360,7 +360,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
return true;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias required: "
@@ -393,7 +393,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
|| STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias not supported for: "
@@ -408,7 +408,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
return true;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias required: "
@@ -433,13 +433,13 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
{
int dist = dist_v[loop_depth];
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance = %d.\n", dist);
if (dist == 0)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance == 0 between ");
@@ -470,7 +470,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
if (!vect_preserves_scalar_order_p (vect_dr_stmt(dra),
vect_dr_stmt (drb)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"READ_WRITE dependence in interleaving.\n");
return true;
@@ -483,7 +483,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
vect_check_nonzero_value (loop_vinfo, indicator);
else if (integer_zerop (indicator))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"access also has a zero step\n");
return true;
@@ -497,7 +497,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
/* If DDR_REVERSED_P the order of the data-refs in DDR was
reversed (to make distance vector positive), and the actual
distance is negative. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"dependence distance negative.\n");
/* Record a negative dependence distance to later limit the
@@ -516,7 +516,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
/* The dependence distance requires reduction of the maximal
vectorization factor. */
*max_vf = abs (dist);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"adjusting maximal vectorization factor to %i\n",
*max_vf);
@@ -526,13 +526,13 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
{
/* Dependence distance does not create dependence, as far as
vectorization is concerned, in this case. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance >= VF.\n");
continue;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized, possible dependence "
@@ -625,7 +625,7 @@ vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
/* Unknown data dependence. */
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't determine dependence between ");
@@ -635,7 +635,7 @@ vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
}
- else if (dump_enabled_p ())
+ else IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"determined dependence between ");
@@ -797,7 +797,7 @@ vect_record_base_alignment (vec_info *vinfo, gimple *stmt,
if (!existed || entry->base_alignment < drb->base_alignment)
{
entry = drb;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"recording new base alignment for ");
@@ -883,7 +883,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
tree ref = DR_REF (dr);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_compute_data_ref_alignment:\n");
@@ -918,7 +918,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
step_preserves_misalignment_p
= (DR_STEP_ALIGNMENT (dr) % vector_alignment) == 0;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
if (step_preserves_misalignment_p)
dump_printf_loc (MSG_NOTE, vect_location,
@@ -940,9 +940,10 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
step_preserves_misalignment_p
= multiple_p (DR_STEP_ALIGNMENT (dr) * vf, vector_alignment);
- if (!step_preserves_misalignment_p && dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "step doesn't divide the vector alignment.\n");
+ if (!step_preserves_misalignment_p)
+ IF_VECT_DUMP
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "step doesn't divide the vector alignment.\n");
}
unsigned int base_alignment = drb->base_alignment;
@@ -963,7 +964,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
negative when computing the starting misalignment below. */
|| TREE_CODE (drb->step) != INTEGER_CST)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown alignment for access: ");
@@ -981,7 +982,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
|| !vect_can_force_dr_alignment_p (base,
vector_alignment * BITS_PER_UNIT))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"can't force alignment of ref: ");
@@ -994,7 +995,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
/* Force the alignment of the decl.
NOTE: This is the only change to the code we make during
the analysis phase, before deciding to vectorize the loop. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
@@ -1020,7 +1021,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
if (!known_misalignment (misalignment, vector_alignment,
&const_misalignment))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Non-constant misalignment for access: ");
@@ -1032,7 +1033,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
SET_DR_MISALIGNMENT (dr, const_misalignment);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
@@ -1101,7 +1102,7 @@ vect_update_misalignment_for_peel (struct data_reference *dr,
return;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment " \
"to unknown (-1).\n");
SET_DR_MISALIGNMENT (dr, DR_MISALIGNMENT_UNKNOWN);
@@ -1119,7 +1120,7 @@ verify_data_ref_alignment (data_reference_p dr)
= vect_supportable_dr_alignment (dr, false);
if (!supportable_dr_alignment)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
if (DR_IS_READ (dr))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -1136,9 +1137,10 @@ verify_data_ref_alignment (data_reference_p dr)
return false;
}
- if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "Vectorizing an unaligned access.\n");
+ if (supportable_dr_alignment != dr_aligned)
+ IF_VECT_DUMP
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Vectorizing an unaligned access.\n");
return true;
}
@@ -1232,7 +1234,7 @@ vector_alignment_reachable_p (struct data_reference *dr)
{
HOST_WIDE_INT elmsize =
int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
@@ -1241,7 +1243,7 @@ vector_alignment_reachable_p (struct data_reference *dr)
}
if (DR_MISALIGNMENT (dr) % elmsize)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"data size does not divide the misalignment.\n");
return false;
@@ -1252,7 +1254,7 @@ vector_alignment_reachable_p (struct data_reference *dr)
{
tree type = TREE_TYPE (DR_REF (dr));
bool is_packed = not_size_aligned (DR_REF (dr));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown misalignment, %snaturally aligned\n",
is_packed ? "not " : "");
@@ -1288,7 +1290,7 @@ vect_get_data_access_cost (struct data_reference *dr,
else
vect_get_store_cost (stmt_info, ncopies, inside_cost, body_cost_vec);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_get_data_access_cost: inside_cost = %d, "
"outside_cost = %d.\n", *inside_cost, *outside_cost);
@@ -1845,7 +1847,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
{
if (!aligned_access_p (dr))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vector alignment may not be reachable\n");
break;
@@ -2041,7 +2043,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
npeel /= DR_GROUP_SIZE (stmt_info);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Try peeling by %d\n", npeel);
}
@@ -2076,7 +2078,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
if (max_peel > max_allowed_peel)
{
do_peeling = false;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Disable peeling, max peels reached: %d\n", max_peel);
}
@@ -2126,7 +2128,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
= DR_MISALIGNMENT (dr0);
SET_DR_MISALIGNMENT (dr0, 0);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using peeling.\n");
@@ -2251,12 +2253,12 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
dr = STMT_VINFO_DATA_REF (stmt_info);
SET_DR_MISALIGNMENT (dr, 0);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using versioning.\n");
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Versioning for alignment will be applied.\n");
@@ -2319,7 +2321,7 @@ vect_find_same_alignment_drs (struct data_dependence_relation *ddr)
STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"accesses have the same alignment: ");
@@ -2366,7 +2368,7 @@ vect_analyze_data_refs_alignment (loop_vec_info vinfo)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't calculate alignment "
"for data ref.\n");
@@ -2400,7 +2402,7 @@ vect_slp_analyze_and_verify_node_alignment (slp_tree node)
&& ! vect_compute_data_ref_alignment (first_dr))
|| ! verify_data_ref_alignment (dr))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: bad data alignment in basic "
"block.\n");
@@ -2470,7 +2472,7 @@ vect_analyze_group_access_1 (struct data_reference *dr)
simply not include that gap. */
if ((dr_step % type_size) != 0)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"Step ");
@@ -2502,7 +2504,7 @@ vect_analyze_group_access_1 (struct data_reference *dr)
DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
DR_GROUP_GAP (stmt_info) = groupsize - 1;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected single element interleaving ");
@@ -2515,7 +2517,7 @@ vect_analyze_group_access_1 (struct data_reference *dr)
return true;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not consecutive access ");
@@ -2557,13 +2559,13 @@ vect_analyze_group_access_1 (struct data_reference *dr)
{
if (DR_IS_WRITE (data_ref))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Two store stmts share the same dr.\n");
return false;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Two or more load stmts share the same dr.\n");
@@ -2591,7 +2593,7 @@ vect_analyze_group_access_1 (struct data_reference *dr)
slp_impossible = true;
if (DR_IS_WRITE (data_ref))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps\n");
return false;
@@ -2619,7 +2621,7 @@ vect_analyze_group_access_1 (struct data_reference *dr)
inefficient way we have to cap earlier. See PR78699 for example. */
if (groupsize > 4096)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"group is too large\n");
return false;
@@ -2630,7 +2632,7 @@ vect_analyze_group_access_1 (struct data_reference *dr)
if (groupsize != count
&& !DR_IS_READ (dr))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps\n");
return false;
@@ -2643,7 +2645,7 @@ vect_analyze_group_access_1 (struct data_reference *dr)
DR_GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element;
DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving ");
@@ -2722,7 +2724,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
if (loop_vinfo && !step)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data-ref access in loop\n");
return false;
@@ -2739,7 +2741,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
loop-carried dependencies between inner loop iterations. */
if (loop->safelen < 2)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"zero step in inner loop of nest\n");
return false;
@@ -2756,7 +2758,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
step = STMT_VINFO_DR_STEP (stmt_info);
if (integer_zerop (step))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"zero step in outer loop.\n");
return DR_IS_READ (dr);
@@ -2779,7 +2781,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
if (loop && nested_in_vect_loop_p (loop, stmt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"grouped access in outer loop.\n");
return false;
@@ -3043,7 +3045,7 @@ vect_analyze_data_ref_accesses (vec_info *vinfo)
break;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving ");
@@ -3073,7 +3075,7 @@ vect_analyze_data_ref_accesses (vec_info *vinfo)
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (vect_dr_stmt (dr)))
&& !vect_analyze_data_ref_access (dr))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated access pattern.\n");
@@ -3226,7 +3228,7 @@ dependence_distance_ge_vf (data_dependence_relation *ddr,
return false;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance between ");
@@ -3268,7 +3270,7 @@ vect_check_lower_bound (loop_vec_info loop_vinfo, tree expr, bool unsigned_p,
{
lower_bounds[i].unsigned_p = unsigned_p;
lower_bounds[i].min_value = min_value;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"updating run-time check to ");
@@ -3280,7 +3282,7 @@ vect_check_lower_bound (loop_vec_info loop_vinfo, tree expr, bool unsigned_p,
}
vec_lower_bound lower_bound (expr, unsigned_p, min_value);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "need a run-time check that ");
dump_lower_bound (MSG_NOTE, lower_bound);
@@ -3413,7 +3415,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
vec_object_pair new_pair (DDR_OBJECT_A (ddr), DDR_OBJECT_B (ddr));
if (!compared_objects.add (new_pair))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "checking that ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.first);
@@ -3438,7 +3440,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
&& (vect_preserves_scalar_order_p (stmt_a, stmt_b)
|| vectorizable_with_step_bound_p (dr_a, dr_b, &lower_bound)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"no need for alias check between ");
@@ -3461,7 +3463,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
|| vect_small_gap_p (loop_vinfo, dr_b, lower_bound)))
{
bool unsigned_p = dr_known_forward_stride_p (dr_a);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "no alias between ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a));
@@ -3537,25 +3539,26 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
segment_length_b,
access_size_a,
access_size_b);
- if (res >= 0 && dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "can tell at compile time that ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a));
- dump_printf (MSG_NOTE, " and ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b));
- if (res == 0)
- dump_printf (MSG_NOTE, " do not alias\n");
- else
- dump_printf (MSG_NOTE, " alias\n");
- }
+ if (res >= 0)
+ IF_VECT_DUMP
+ {
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "can tell at compile time that ");
+ dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a));
+ dump_printf (MSG_NOTE, " and ");
+ dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b));
+ if (res == 0)
+ dump_printf (MSG_NOTE, " do not alias\n");
+ else
+ dump_printf (MSG_NOTE, " alias\n");
+ }
if (res == 0)
continue;
if (res == 1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: compilation time alias.\n");
return false;
@@ -3583,7 +3586,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
may_alias_ddrs.length (), count);
if ((int) count > PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"number of versioning for alias "
"run-time tests exceeds %d "
@@ -3956,7 +3959,7 @@ vect_find_stmt_data_reference (loop_p loop, gimple *stmt,
if (gimple_has_volatile_ops (stmt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: volatile type ");
@@ -3967,7 +3970,7 @@ vect_find_stmt_data_reference (loop_p loop, gimple *stmt,
if (stmt_can_throw_internal (stmt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: statement can throw an "
@@ -3986,7 +3989,7 @@ vect_find_stmt_data_reference (loop_p loop, gimple *stmt,
if (refs.length () > 1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: more than one data ref "
@@ -4001,7 +4004,7 @@ vect_find_stmt_data_reference (loop_p loop, gimple *stmt,
|| (gimple_call_internal_fn (call) != IFN_MASK_LOAD
&& gimple_call_internal_fn (call) != IFN_MASK_STORE))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: dr in a call ");
@@ -4014,7 +4017,7 @@ vect_find_stmt_data_reference (loop_p loop, gimple *stmt,
if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
&& DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: statement is bitfield "
@@ -4027,7 +4030,7 @@ vect_find_stmt_data_reference (loop_p loop, gimple *stmt,
if (DR_BASE_ADDRESS (dr)
&& TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: base addr of dr is a "
"constant\n");
@@ -4178,7 +4181,7 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
if (gatherscatter == SG_NONE && !simd_lane_access)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: data ref analysis "
@@ -4200,7 +4203,7 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
&& VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (dr), 0))
&& DECL_NONALIASED (TREE_OPERAND (DR_BASE_ADDRESS (dr), 0)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: base object not addressable "
@@ -4222,7 +4225,7 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
{
if (nested_in_vect_loop_p (loop, stmt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: not suitable for strided "
@@ -4256,7 +4259,7 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
tree init_addr = fold_build_pointer_plus (base, init_offset);
tree init_ref = build_fold_indirect_ref (init_addr);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"analyze in outer loop: ");
@@ -4269,7 +4272,7 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
/* dr_analyze_innermost already explained the failure. */
return false;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"\touter base_address: ");
@@ -4311,7 +4314,7 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
= get_vectype_for_scalar_type (scalar_type);
if (!STMT_VINFO_VECTYPE (stmt_info))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: no vectype for stmt: ");
@@ -4340,7 +4343,7 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
}
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"got vectype for stmt: ");
@@ -4365,7 +4368,7 @@ vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
{
STMT_VINFO_DATA_REF (stmt_info) = NULL;
free_data_ref (dr);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
(gatherscatter == GATHER) ?
@@ -4591,7 +4594,7 @@ vect_create_addr_base_for_vector_ref (gimple *stmt,
mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "created ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
@@ -4718,7 +4721,7 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop,
in LOOP. */
base_name = get_name (DR_BASE_ADDRESS (dr));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
dump_printf_loc (MSG_NOTE, vect_location,
@@ -5048,7 +5051,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
be a power of two. */
if (count != 3 && exact_log2 (count) == -1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"the size of the group of accesses"
" is not a power of 2 or not eqaul to 3\n");
@@ -5067,7 +5070,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
unsigned int nelt;
if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cannot handle groups of 3 stores for"
" variable-length vectors\n");
@@ -5094,7 +5097,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (mode, indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf (MSG_MISSED_OPTIMIZATION,
"permutation op not supported by target.\n");
return false;
@@ -5112,7 +5115,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (mode, indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf (MSG_MISSED_OPTIMIZATION,
"permutation op not supported by target.\n");
return false;
@@ -5146,7 +5149,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
}
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf (MSG_MISSED_OPTIMIZATION,
"permutaion op not supported by target.\n");
return false;
@@ -5659,7 +5662,7 @@ vect_grouped_load_supported (tree vectype, bool single_element_p,
see PR65518). */
if (single_element_p && maybe_gt (count, TYPE_VECTOR_SUBPARTS (vectype)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"single-element interleaving not supported "
"for not adjacent vector loads\n");
@@ -5670,7 +5673,7 @@ vect_grouped_load_supported (tree vectype, bool single_element_p,
be a power of two. */
if (count != 3 && exact_log2 (count) == -1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"the size of the group of accesses"
" is not a power of 2 or not equal to 3\n");
@@ -5686,7 +5689,7 @@ vect_grouped_load_supported (tree vectype, bool single_element_p,
unsigned int nelt;
if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cannot handle groups of 3 loads for"
" variable-length vectors\n");
@@ -5707,7 +5710,7 @@ vect_grouped_load_supported (tree vectype, bool single_element_p,
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (mode, indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shuffle of 3 loads is not supported by"
" target\n");
@@ -5721,7 +5724,7 @@ vect_grouped_load_supported (tree vectype, bool single_element_p,
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (mode, indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shuffle of 3 loads is not supported by"
" target\n");
@@ -5753,7 +5756,7 @@ vect_grouped_load_supported (tree vectype, bool single_element_p,
}
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"extract even/odd not supported by target\n");
return false;
@@ -6096,7 +6099,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
vec_perm_indices indices (sel, 2, nelt);
if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shuffle of 2 fields structure is not \
supported by target\n");
@@ -6111,7 +6114,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shuffle of 2 fields structure is not \
supported by target\n");
@@ -6126,7 +6129,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shift permutation is not supported by target\n");
return false;
@@ -6142,7 +6145,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"select is not supported by target\n");
return false;
@@ -6206,7 +6209,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
vec_perm_indices indices (sel, 2, nelt);
if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shuffle of 3 fields structure is not \
supported by target\n");
@@ -6221,7 +6224,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shift permutation is not supported by target\n");
return false;
@@ -6235,7 +6238,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shift permutation is not supported by target\n");
return false;
@@ -6249,7 +6252,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shift permutation is not supported by target\n");
return false;
@@ -6263,7 +6266,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shift permutation is not supported by target\n");
return false;
@@ -938,7 +938,7 @@ vect_set_loop_condition (struct loop *loop, loop_vec_info loop_vinfo,
gsi_remove (&loop_cond_gsi, true);
free_stmt_vec_info (orig_cond);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "New loop exit condition: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, cond_stmt, 0);
@@ -1301,7 +1301,7 @@ create_lcssa_for_virtual_phi (struct loop *loop)
location is calculated.
Return the loop location if succeed and NULL if not. */
-source_location
+optinfo_location
find_loop_location (struct loop *loop)
{
gimple *stmt = NULL;
@@ -1309,19 +1309,19 @@ find_loop_location (struct loop *loop)
gimple_stmt_iterator si;
if (!loop)
- return UNKNOWN_LOCATION;
+ return optinfo_location ();
stmt = get_loop_exit_condition (loop);
if (stmt
&& LOCATION_LOCUS (gimple_location (stmt)) > BUILTINS_LOCATION)
- return gimple_location (stmt);
+ return stmt;
/* If we got here the loop is probably not "well formed",
try to estimate the loop location */
if (!loop->header)
- return UNKNOWN_LOCATION;
+ return optinfo_location ();
bb = loop->header;
@@ -1329,10 +1329,10 @@ find_loop_location (struct loop *loop)
{
stmt = gsi_stmt (si);
if (LOCATION_LOCUS (gimple_location (stmt)) > BUILTINS_LOCATION)
- return gimple_location (stmt);
+ return stmt;
}
- return UNKNOWN_LOCATION;
+ return optinfo_location ();
}
/* Return true if PHI defines an IV of the loop to be vectorized. */
@@ -1370,14 +1370,14 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
/* Analyze phi functions of the loop header. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "vect_can_advance_ivs_p:\n");
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
tree evolution_part;
gphi *phi = gsi.phi ();
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
@@ -1389,7 +1389,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
Skip reduction phis. */
if (!iv_phi_p (phi))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"reduc or virtual phi. skip.\n");
continue;
@@ -1401,7 +1401,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (vinfo_for_stmt (phi));
if (evolution_part == NULL_TREE)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf (MSG_MISSED_OPTIMIZATION,
"No access function or evolution.\n");
return false;
@@ -1412,7 +1412,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
if (!expr_invariant_in_loop_p (loop, evolution_part))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"evolution not invariant in loop.\n");
return false;
@@ -1423,7 +1423,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
if (tree_is_chrec (evolution_part))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"evolution is chrec.\n");
return false;
@@ -1500,7 +1500,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo,
gphi *phi = gsi.phi ();
gphi *phi1 = gsi1.phi ();
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_update_ivs_after_vectorizer: phi: ");
@@ -1510,7 +1510,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo,
/* Skip reduction and virtual phis. */
if (!iv_phi_p (phi))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"reduc or virtual phi. skip.\n");
continue;
@@ -1640,7 +1640,7 @@ vect_gen_prolog_loop_niters (loop_vec_info loop_vinfo,
{
int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"known peeling = %d.\n", npeel);
@@ -1671,7 +1671,7 @@ vect_gen_prolog_loop_niters (loop_vec_info loop_vinfo,
*bound = align_in_elems - 1;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"niters for prolog loop: ");
@@ -1791,7 +1791,7 @@ vect_prepare_for_masked_peels (loop_vec_info loop_vinfo)
}
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"misalignment for fully-masked loop: ");
@@ -2494,7 +2494,7 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
}
}
- source_location loop_loc = find_loop_location (loop);
+ optinfo_location loop_loc = find_loop_location (loop);
struct loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
if (prolog_peeling)
{
@@ -2930,7 +2930,7 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo, tree * cond_expr)
create_runtime_alias_checks (LOOP_VINFO_LOOP (loop_vinfo),
&comp_alias_ddrs, cond_expr);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"created %u versioning for alias checks.\n",
comp_alias_ddrs.length ());
@@ -3068,19 +3068,18 @@ vect_loop_versioning (loop_vec_info loop_vinfo,
loop_constraint_set (loop, LOOP_C_INFINITE);
}
- if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
- && dump_enabled_p ())
- {
- if (version_alias)
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
- "loop versioned for vectorization because of "
- "possible aliasing\n");
- if (version_align)
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
- "loop versioned for vectorization to enhance "
- "alignment\n");
-
- }
+ if (LOCATION_LOCUS (vect_location.get_location_t ()) != UNKNOWN_LOCATION)
+ IF_VECT_DUMP
+ {
+ if (version_alias)
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
+ "loop versioned for vectorization because of "
+ "possible aliasing\n");
+ if (version_align)
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
+ "loop versioned for vectorization to enhance "
+ "alignment\n");
+ }
free_original_copy_tables ();
/* Loop versioning violates an assumption we try to maintain during
@@ -171,7 +171,7 @@ vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info,
&& !STMT_VINFO_LIVE_P (stmt_info))
|| gimple_clobber_p (stmt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
return true;
}
@@ -213,7 +213,7 @@ static bool
vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
vec<stmt_vec_info > *mask_producers)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
@@ -232,7 +232,7 @@ vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
!gsi_end_p (si); gsi_next (&si))
{
stmt_vec_info def_stmt_info = vinfo_for_stmt (gsi_stmt (si));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def stmt: ");
@@ -244,7 +244,7 @@ vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
return false;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
@@ -307,7 +307,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
{
phi = si.phi ();
stmt_info = vinfo_for_stmt (phi);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
@@ -321,7 +321,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
scalar_type = TREE_TYPE (PHI_RESULT (phi));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
@@ -332,7 +332,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported "
@@ -345,14 +345,14 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
}
STMT_VINFO_VECTYPE (stmt_info) = vectype;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
dump_printf (MSG_NOTE, "\n");
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
@@ -374,7 +374,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
}
/* TODO: Analyze cost. Decide if worth while to vectorize. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
dump_dec (MSG_NOTE, vectorization_factor);
@@ -383,7 +383,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
if (known_le (vectorization_factor, 1U))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported data-type\n");
return false;
@@ -430,7 +430,7 @@ vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
step_expr = evolution_part;
init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "step: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
@@ -452,7 +452,7 @@ vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
&& (TREE_CODE (step_expr) != REAL_CST
|| !flag_associative_math))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"step unknown.\n");
return false;
@@ -489,7 +489,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
tree def = PHI_RESULT (phi);
stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
@@ -507,7 +507,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
if (access_fn)
{
STRIP_NOPS (access_fn);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"Access function of PHI: ");
@@ -533,7 +533,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
!= NULL_TREE);
gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
}
@@ -547,7 +547,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
gimple *reduc_stmt;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
@@ -562,7 +562,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
{
if (double_reduc)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Detected double reduction.\n");
@@ -574,7 +574,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
{
if (loop != LOOP_VINFO_LOOP (loop_vinfo))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Detected vectorizable nested cycle.\n");
@@ -584,7 +584,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
}
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Detected reduction.\n");
@@ -600,7 +600,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
}
}
else
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown def-use cycle pattern.\n");
}
@@ -1186,7 +1186,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
if (loop->num_nodes != 2)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: control flow in loop.\n");
return false;
@@ -1194,7 +1194,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
if (empty_block_p (loop->header))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: empty loop.\n");
return false;
@@ -1224,7 +1224,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
if ((loop->inner)->inner || (loop->inner)->next)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: multiple nested loops.\n");
return false;
@@ -1232,7 +1232,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
if (loop->num_nodes != 5)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: control flow in loop.\n");
return false;
@@ -1243,7 +1243,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
|| !single_exit (innerloop)
|| single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported outerloop form.\n");
return false;
@@ -1258,7 +1258,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
loop. */
|| !integer_onep (inner_assumptions))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: Bad inner loop.\n");
return false;
@@ -1266,14 +1266,14 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
if (!expr_invariant_in_loop_p (loop, inner_niter))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: inner-loop count not"
" invariant.\n");
return false;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Considering outer-loop vectorization.\n");
}
@@ -1281,7 +1281,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
if (!single_exit (loop)
|| EDGE_COUNT (loop->header->preds) != 2)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
if (!single_exit (loop))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -1300,7 +1300,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
if (!empty_block_p (loop->latch)
|| !gimple_seq_empty_p (phi_nodes (loop->latch)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: latch block not empty.\n");
return false;
@@ -1310,7 +1310,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
edge e = single_exit (loop);
if (e->flags & EDGE_ABNORMAL)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: abnormal loop exit edge.\n");
return false;
@@ -1320,7 +1320,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
number_of_iterationsm1);
if (!*loop_cond)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated exit condition.\n");
return false;
@@ -1330,7 +1330,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
|| !*number_of_iterations
|| chrec_contains_undetermined (*number_of_iterations))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: number of iterations cannot be "
"computed.\n");
@@ -1339,7 +1339,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
if (integer_zerop (*number_of_iterations))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: number of iterations = 0.\n");
return false;
@@ -1381,8 +1381,8 @@ vect_analyze_loop_form (struct loop *loop)
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
- if (dump_enabled_p ())
- {
+ IF_VECT_DUMP
+ {
dump_printf_loc (MSG_NOTE, vect_location,
"Symbolic number of iterations is ");
dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
@@ -1466,7 +1466,7 @@ vect_update_vf_for_slp (loop_vec_info loop_vinfo)
}
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"Updating vectorization factor to ");
@@ -1533,7 +1533,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo)
ok = true;
stmt_info = vinfo_for_stmt (phi);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
@@ -1552,7 +1552,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo)
if (STMT_VINFO_LIVE_P (stmt_info)
&& !vect_active_double_reduction_p (stmt_info))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported loop-closed phi in "
"outer-loop.\n");
@@ -1596,7 +1596,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo)
&& STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
{
/* A scalar-dependence cycle that we don't support. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: scalar dependence cycle.\n");
return false;
@@ -1624,7 +1624,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo)
if (!ok)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: relevant phi not "
@@ -1656,10 +1656,10 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo)
touching this loop. */
if (!need_to_vectorize)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"All the computation can be taken out of the loop.\n");
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: redundant loop. no profit to "
"vectorize.\n");
@@ -1693,7 +1693,7 @@ vect_analyze_loop_costing (loop_vec_info loop_vinfo)
if (max_niter != -1
&& (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: iteration count smaller than "
"vectorization factor.\n");
@@ -1707,10 +1707,10 @@ vect_analyze_loop_costing (loop_vec_info loop_vinfo)
if (min_profitable_iters < 0)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization not profitable.\n");
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vector version will never be "
"profitable.\n");
@@ -1730,10 +1730,10 @@ vect_analyze_loop_costing (loop_vec_info loop_vinfo)
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization not profitable.\n");
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: iteration count smaller than user "
"specified loop bound parameter or minimum profitable "
@@ -1748,11 +1748,11 @@ vect_analyze_loop_costing (loop_vec_info loop_vinfo)
&& ((unsigned HOST_WIDE_INT) estimated_niter
< MAX (th, (unsigned) min_profitable_estimate)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: estimated iteration count too "
"small.\n");
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: estimated iteration count smaller "
"than specified loop bound parameter or minimum "
@@ -1838,7 +1838,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop nest containing two "
"or more consecutive inner loops cannot be "
@@ -1852,7 +1852,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
&LOOP_VINFO_DATAREFS (loop_vinfo),
&n_stmts))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop contains function "
"calls or data references that cannot "
@@ -1866,7 +1866,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
if (!ok)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data references.\n");
return false;
@@ -1886,7 +1886,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
ok = vect_analyze_data_ref_accesses (loop_vinfo);
if (!ok)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data access.\n");
return false;
@@ -1897,7 +1897,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
if (!ok)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unexpected pattern.\n");
return false;
@@ -1916,7 +1916,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
|| (max_vf != MAX_VECTORIZATION_FACTOR
&& maybe_lt (max_vf, min_vf)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data dependence.\n");
return false;
@@ -1926,7 +1926,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
ok = vect_determine_vectorization_factor (loop_vinfo);
if (!ok)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't determine vectorization factor.\n");
return false;
@@ -1934,7 +1934,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
if (max_vf != MAX_VECTORIZATION_FACTOR
&& maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data dependence.\n");
return false;
@@ -1975,14 +1975,15 @@ start_over:
poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
gcc_assert (known_ne (vectorization_factor, 0U));
- if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "vectorization_factor = ");
- dump_dec (MSG_NOTE, vectorization_factor);
- dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n",
- LOOP_VINFO_INT_NITERS (loop_vinfo));
- }
+ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
+ IF_VECT_DUMP
+ {
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "vectorization_factor = ");
+ dump_dec (MSG_NOTE, vectorization_factor);
+ dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n",
+ LOOP_VINFO_INT_NITERS (loop_vinfo));
+ }
HOST_WIDE_INT max_niter
= likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
@@ -1993,7 +1994,7 @@ start_over:
ok = vect_analyze_data_refs_alignment (loop_vinfo);
if (!ok)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data alignment.\n");
return false;
@@ -2015,7 +2016,7 @@ start_over:
ok = vect_enhance_data_refs_alignment (loop_vinfo);
if (!ok)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data alignment.\n");
return false;
@@ -2038,7 +2039,7 @@ start_over:
ok = vect_analyze_loop_operations (loop_vinfo);
if (!ok)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad operation or unsupported loop bound.\n");
return false;
@@ -2049,7 +2050,7 @@ start_over:
LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
= (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
&& vect_verify_full_masking (loop_vinfo));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
dump_printf_loc (MSG_NOTE, vect_location,
@@ -2071,7 +2072,7 @@ start_over:
if (known_lt (wi::to_widest (scalar_niters), vf))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"loop has no enough iterations to support"
" peeling for gaps.\n");
@@ -2085,7 +2086,7 @@ start_over:
goto again;
if (!res)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Loop costings not worthwhile.\n");
return false;
@@ -2123,14 +2124,14 @@ start_over:
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
|| LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
if (!vect_can_advance_ivs_p (loop_vinfo)
|| !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
single_exit (LOOP_VINFO_LOOP
(loop_vinfo))))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't create required "
"epilog loop\n");
@@ -2218,7 +2219,7 @@ again:
}
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"re-trying with SLP disabled\n");
@@ -2302,7 +2303,7 @@ vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo)
&& loop_vec_info_for_loop (loop_outer (loop))
&& LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"outer-loop already vectorized.\n");
return NULL;
@@ -2315,7 +2316,7 @@ vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo)
loop_vinfo = vect_analyze_loop_form (loop);
if (!loop_vinfo)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad loop form.\n");
return NULL;
@@ -2349,7 +2350,7 @@ vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo)
/* Try the next biggest vector size. */
current_vector_size = vector_sizes[next_size++];
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"***** Re-trying analysis with "
@@ -2652,7 +2653,7 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def_stmt)))))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
@@ -2908,7 +2909,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"intermediate value used outside loop.\n");
@@ -2918,7 +2919,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
nloop_uses++;
if (nloop_uses > 1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction value used in loop.\n");
return NULL;
@@ -2931,7 +2932,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
if (TREE_CODE (loop_arg) != SSA_NAME)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction: not ssa_name: ");
@@ -2954,7 +2955,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
}
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction: unhandled reduction operation: ");
@@ -2980,7 +2981,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
lcphis.safe_push (as_a <gphi *> (use_stmt));
if (nloop_uses > 1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction used in loop.\n");
return NULL;
@@ -2996,7 +2997,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
if (gimple_phi_num_args (def_stmt) != 1
|| TREE_CODE (op1) != SSA_NAME)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported phi node definition.\n");
@@ -3011,7 +3012,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
&& is_gimple_assign (def1)
&& flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
report_vect_op (MSG_NOTE, def_stmt,
"detected double reduction: ");
@@ -3065,7 +3066,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
}
if (op3 == phi_name || op4 == phi_name)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: condition depends on previous"
" iteration: ");
@@ -3077,7 +3078,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
}
else if (!commutative_tree_code (code) || !associative_tree_code (code))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: not commutative/associative: ");
return NULL;
@@ -3089,7 +3090,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
}
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: not handled operation: ");
return NULL;
@@ -3097,7 +3098,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: both uses not ssa_names: ");
@@ -3114,7 +3115,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
|| (op4 && TREE_CODE (op4) == SSA_NAME
&& !types_compatible_p (type, TREE_TYPE (op4))))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"reduction: multiple types: operation type: ");
@@ -3170,7 +3171,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
if (code != COND_EXPR
&& ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
return NULL;
}
@@ -3193,7 +3194,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def1)))))))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
return def_stmt;
}
@@ -3238,7 +3239,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
}
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
report_vect_op (MSG_NOTE, def_stmt,
"detected reduction: cannot swap operands "
"for cond_expr");
@@ -3249,7 +3250,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
gimple_assign_rhs2_ptr (def_stmt));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
report_vect_op (MSG_NOTE, def_stmt,
"detected reduction: need to swap operands: ");
@@ -3258,7 +3259,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
}
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
}
@@ -3271,7 +3272,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
&& orig_code != MINUS_EXPR
&& vect_is_slp_reduction (loop_info, phi, def_stmt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
report_vect_op (MSG_NOTE, def_stmt,
"reduction: detected reduction chain: ");
@@ -3293,7 +3294,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
code))
return def_stmt;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: unknown pattern: ");
@@ -3341,7 +3342,7 @@ vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
*peel_iters_epilogue = assumed_vf / 2;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"cost model: epilogue peel iters set to vf/2 "
"because loop iterations are unknown .\n");
@@ -3687,7 +3688,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
@@ -3742,10 +3743,10 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
else
{
if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
- warning_at (vect_location, OPT_Wopenmp_simd, "vectorization "
- "did not happen for a simd loop");
+ warning_at (vect_location.get_location_t (), OPT_Wopenmp_simd,
+ "vectorization did not happen for a simd loop");
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cost model: the vector iteration cost = %d "
"divided by the scalar iteration cost = %d "
@@ -3757,16 +3758,17 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
return;
}
- dump_printf (MSG_NOTE,
- " Calculated minimum iters for profitability: %d\n",
- min_profitable_iters);
+ IF_VECT_DUMP
+ dump_printf (MSG_NOTE,
+ " Calculated minimum iters for profitability: %d\n",
+ min_profitable_iters);
if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
&& min_profitable_iters < (assumed_vf + peel_iters_prologue))
/* We want the vectorized loop to execute at least once. */
min_profitable_iters = assumed_vf + peel_iters_prologue;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
" Runtime profitability threshold = %d\n",
min_profitable_iters);
@@ -3792,7 +3794,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
- vec_inside_cost);
}
min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
" Static estimate profitability threshold = %d\n",
min_profitable_estimate);
@@ -4008,7 +4010,7 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
}
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf (MSG_NOTE,
"vect_model_reduction_cost: inside_cost = %d, "
"prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
@@ -4037,7 +4039,7 @@ vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies,
prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
stmt_info, 0, vect_prologue);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_induction_cost: inside_cost = %d, "
"prologue_cost = %d .\n", inside_cost, prologue_cost);
@@ -4625,7 +4627,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
add_phi_arg (as_a <gphi *> (phi), def, loop_latch_edge (loop),
UNKNOWN_LOCATION);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"transform reduction: created def-use cycle: ");
@@ -5114,7 +5116,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
/* Case 1: Create:
v_out2 = reduc_expr <v_out1> */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using direct vector reduction.\n");
@@ -5383,7 +5385,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
tree rhs;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using vector shifts\n");
@@ -5412,7 +5414,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
/* 2.4 Extract the final scalar result. Create:
s_out3 = extract_field <v_out2, bitpos> */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"extract scalar result\n");
@@ -5436,7 +5438,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
Create: s = op <s, s'> // For non SLP cases
} */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using scalar code.\n");
@@ -5759,7 +5761,7 @@ vect_finalize_reduction:
UNKNOWN_LOCATION);
add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"created double reduction phi node: ");
@@ -6443,7 +6445,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
{
if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"in-order reduction chain without SLP.\n");
return false;
@@ -6497,7 +6499,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
as a reduction operation. */
if (reduc_index == -1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"conditional reduction chains not supported\n");
return false;
@@ -6522,7 +6524,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
vectype_in, OPTIMIZE_FOR_SPEED))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"optimizing condition reduction with"
" FOLD_EXTRACT_LAST.\n");
@@ -6563,7 +6565,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
}
if (cond_reduc_val)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"condition expression based on "
"integer induction.\n");
@@ -6589,7 +6591,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
cond_initial_val, cond_reduc_val);
if (e && (integer_onep (e) || integer_zerop (e)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"condition expression based on "
"compile time constant.\n");
@@ -6634,7 +6636,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
ops[reduc_index], 0, NULL,
cost_vec))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported condition in reduction\n");
return false;
@@ -6649,7 +6651,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
{
/* Shifts and rotates are only supported by vectorizable_shifts,
not vectorizable_reduction. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported shift or rotation.\n");
return false;
@@ -6659,7 +6661,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
optab = optab_for_tree_code (code, vectype_in, optab_default);
if (!optab)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.\n");
@@ -6668,14 +6670,14 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf (MSG_NOTE, "op not supported by target.\n");
if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
|| !vect_worthwhile_without_simd_p (loop_vinfo, code))
return false;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf (MSG_NOTE, "proceeding using word mode.\n");
}
@@ -6683,7 +6685,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
&& !vect_worthwhile_without_simd_p (loop_vinfo, code))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.\n");
@@ -6789,7 +6791,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
&& !direct_internal_fn_supported_p (reduc_fn, vectype_out,
OPTIMIZE_FOR_SPEED))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduc op not supported by target.\n");
@@ -6800,7 +6802,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
{
if (!nested_cycle || double_reduc)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no reduc code for scalar code.\n");
@@ -6825,7 +6827,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
&& reduc_fn == IFN_LAST
&& !nunits_out.is_constant ())
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"missing target support for reduction on"
" variable-length vectors.\n");
@@ -6835,7 +6837,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
&& ncopies > 1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in double reduction or condition "
"reduction.\n");
@@ -6865,7 +6867,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
l += a[j];
which is a reassociation of the original operation. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"in-order double reduction not supported.\n");
@@ -6878,7 +6880,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
{
/* We cannot use in-order reductions in this case because there is
an implicit reassociation of the operations involved. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"in-order unchained SLP reductions not supported.\n");
return false;
@@ -6893,7 +6895,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
&& !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
vectype_out, OPTIMIZE_FOR_SPEED))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction on variable-length vectors requires"
" target support for a vector-shift-and-insert"
@@ -6916,7 +6918,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if (!neutral_op
&& !can_duplicate_and_interleave_p (group_size, elt_mode))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported form of SLP reduction for"
" variable-length vectors: cannot build"
@@ -6928,7 +6930,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
up the the initial vector does too. */
if (!multiple_p (nunits_out, group_size))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported form of SLP reduction for"
" variable-length vectors: the vector size"
@@ -6949,7 +6951,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"invalid types in dot-prod\n");
@@ -6963,7 +6965,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if (! max_loop_iterations (loop, &ni))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"loop count not known, cannot create cond "
"reduction.\n");
@@ -6978,7 +6980,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
if (wi::geu_p (ni, wi::to_widest (max_index)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"loop size is greater than data size.\n");
return false;
@@ -7036,7 +7038,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
|| code == WIDEN_SUM_EXPR
|| code == SAD_EXPR))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multi def-use cycle not possible for lane-reducing "
"reduction operation\n");
@@ -7062,7 +7064,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
|| !direct_internal_fn_supported_p (cond_fn, vectype_in,
OPTIMIZE_FOR_SPEED)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because no"
" conditional operation is available.\n");
@@ -7070,7 +7072,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
}
else if (reduc_index == -1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop for chained"
" reductions.\n");
@@ -7080,17 +7082,17 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
vectype_in);
}
- if (dump_enabled_p ()
- && reduction_type == FOLD_LEFT_REDUCTION)
- dump_printf_loc (MSG_NOTE, vect_location,
- "using an in-order (fold-left) reduction.\n");
+ if (reduction_type == FOLD_LEFT_REDUCTION)
+ IF_VECT_DUMP
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "using an in-order (fold-left) reduction.\n");
STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
return true;
}
/* Transform. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
/* FORNOW: Multiple types are not supported for condition. */
@@ -7387,7 +7389,7 @@ vectorizable_induction (gimple *phi,
if (ncopies > 1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.\n");
return false;
@@ -7418,7 +7420,7 @@ vectorizable_induction (gimple *phi,
if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
&& !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"inner-loop induction only used outside "
"of the outer vectorized loop.\n");
@@ -7436,7 +7438,7 @@ vectorizable_induction (gimple *phi,
if (slp_node && !nunits.is_constant ())
{
/* The current SLP code creates the initial value element-by-element. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"SLP induction not supported for variable-length"
" vectors.\n");
@@ -7458,7 +7460,7 @@ vectorizable_induction (gimple *phi,
evolution S, for a vector of 4 units, we want to compute:
[X, X + S, X + 2*S, X + 3*S]. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
latch_e = loop_latch_edge (iv_loop);
@@ -7850,7 +7852,7 @@ vectorizable_induction (gimple *phi,
&& !STMT_VINFO_LIVE_P (stmt_vinfo));
STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"vector of inductions after inner-loop:");
@@ -7860,7 +7862,7 @@ vectorizable_induction (gimple *phi,
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"transform induction: created def-use cycle: ");
@@ -7912,7 +7914,7 @@ vectorizable_live_operation (gimple *stmt,
if (!STMT_VINFO_RELEVANT_P (stmt_info))
{
gcc_assert (is_simple_and_all_uses_invariant (stmt, loop_vinfo));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"statement is simple and uses invariant. Leaving in "
"place.\n");
@@ -7940,7 +7942,7 @@ vectorizable_live_operation (gimple *stmt,
that vector we need. */
if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Cannot determine which vector holds the"
" final result.\n");
@@ -7956,7 +7958,7 @@ vectorizable_live_operation (gimple *stmt,
if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
OPTIMIZE_FOR_SPEED))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because "
"the target doesn't support extract last "
@@ -7965,7 +7967,7 @@ vectorizable_live_operation (gimple *stmt,
}
else if (slp_node)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because an "
"SLP statement is live after the loop.\n");
@@ -7973,7 +7975,7 @@ vectorizable_live_operation (gimple *stmt,
}
else if (ncopies > 1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because"
" ncopies is greater than 1.\n");
@@ -8118,7 +8120,7 @@ vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt)
{
if (gimple_debug_bind_p (ustmt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"killing debug use\n");
@@ -8330,7 +8332,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
if (th >= vect_vf_for_cost (loop_vinfo)
&& !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Profitability threshold is %d loop iterations.\n",
th);
@@ -8343,7 +8345,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
if (! single_pred_p (e->dest))
{
split_loop_exit_edge (e);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf (MSG_NOTE, "split exit edge\n");
}
@@ -8377,7 +8379,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
if (! single_pred_p (e->dest))
{
split_loop_exit_edge (e);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
}
}
@@ -8432,7 +8434,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
gsi_next (&si))
{
gphi *phi = si.phi ();
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"------>vectorizing phi: ");
@@ -8451,16 +8453,16 @@ vect_transform_loop (loop_vec_info loop_vinfo)
if (STMT_VINFO_VECTYPE (stmt_info)
&& (maybe_ne
- (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
- && dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
+ (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf)))
+ IF_VECT_DUMP
+ dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
&& ! PURE_SLP_STMT (stmt_info))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
}
@@ -8487,7 +8489,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
}
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"------>vectorizing statement: ");
@@ -8559,7 +8561,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
if (!gsi_end_p (pattern_def_si))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> vectorizing pattern def "
@@ -8586,11 +8588,11 @@ vect_transform_loop (loop_vec_info loop_vinfo)
poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
if (!STMT_SLP_TYPE (stmt_info)
- && maybe_ne (nunits, vf)
- && dump_enabled_p ())
+ && maybe_ne (nunits, vf))
+ IF_VECT_DUMP
/* For SLP VF is set according to unrolling factor, and not
to vector size, hence for SLP this print is not valid. */
- dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
+ dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
}
/* SLP. Schedule all the SLP instances when the first SLP stmt is
@@ -8619,7 +8621,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
}
/* -------- vectorize statement ------------ */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
grouped_store = false;
@@ -8731,7 +8733,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
: wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
assumed_vf) - 1);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
{
@@ -8901,7 +8903,7 @@ optimize_mask_stores (struct loop *loop)
make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
if (dom_info_available_p (CDI_DOMINATORS))
set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Create new block %d to sink mask stores.",
store_bb->index);
@@ -8940,7 +8942,7 @@ optimize_mask_stores (struct loop *loop)
gsi_move_before (&gsi_from, &gsi_to);
/* Setup GSI_TO to the non-empty block start. */
gsi_to = gsi_start_bb (store_bb);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"Move stmt to created bb\n");
@@ -9008,7 +9010,7 @@ optimize_mask_stores (struct loop *loop)
break;
/* Can move STMT1 to STORE_BB. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"Move stmt to created bb\n");
@@ -449,7 +449,7 @@ vect_recog_dot_prod_pattern (vec<gimple *> *stmts, tree *type_in,
pattern_stmt = gimple_build_assign (var, DOT_PROD_EXPR,
oprnd00, oprnd01, oprnd1);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_dot_prod_pattern: detected: ");
@@ -683,7 +683,7 @@ vect_recog_sad_pattern (vec<gimple *> *stmts, tree *type_in,
gimple *pattern_stmt = gimple_build_assign (var, SAD_EXPR, sad_oprnd0,
sad_oprnd1, plus_oprnd1);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_sad_pattern: detected: ");
@@ -965,7 +965,7 @@ vect_recog_widen_mult_pattern (vec<gimple *> *stmts,
TYPE_UNSIGNED (type));
/* Pattern detected. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_widen_mult_pattern: detected:\n");
@@ -1016,7 +1016,7 @@ vect_recog_widen_mult_pattern (vec<gimple *> *stmts,
gimple_assign_lhs (pattern_stmt));
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
stmts->safe_push (last_stmt);
@@ -1286,7 +1286,7 @@ vect_recog_widen_sum_pattern (vec<gimple *> *stmts, tree *type_in,
var = vect_recog_temp_ssa_var (type, NULL);
pattern_stmt = gimple_build_assign (var, WIDEN_SUM_EXPR, oprnd0, oprnd1);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_widen_sum_pattern: detected: ");
@@ -1585,7 +1585,7 @@ vect_recog_over_widening_pattern (vec<gimple *> *stmts,
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)) = pattern_stmt;
new_pattern_def_seq (vinfo_for_stmt (stmt), new_def_stmt);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"created pattern stmt: ");
@@ -1652,7 +1652,7 @@ vect_recog_over_widening_pattern (vec<gimple *> *stmts,
return NULL;
/* Pattern detected. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_over_widening_pattern: detected: ");
@@ -1789,7 +1789,7 @@ vect_recog_widen_shift_pattern (vec<gimple *> *stmts,
return NULL;
/* Pattern detected. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_widen_shift_pattern: detected:\n");
@@ -1822,7 +1822,7 @@ vect_recog_widen_shift_pattern (vec<gimple *> *stmts,
STMT_VINFO_VECTYPE (new_stmt_info) = vectype;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
stmts->safe_push (last_stmt);
@@ -2059,7 +2059,7 @@ vect_recog_rotate_pattern (vec<gimple *> *stmts, tree *type_in, tree *type_out)
append_pattern_def_seq (stmt_vinfo, def_stmt);
/* Pattern detected. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_rotate_pattern: detected:\n");
@@ -2067,7 +2067,7 @@ vect_recog_rotate_pattern (vec<gimple *> *stmts, tree *type_in, tree *type_out)
var = vect_recog_temp_ssa_var (type, NULL);
pattern_stmt = gimple_build_assign (var, BIT_IOR_EXPR, var1, var2);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
stmts->safe_push (last_stmt);
@@ -2203,7 +2203,7 @@ vect_recog_vector_vector_shift_pattern (vec<gimple *> *stmts,
}
/* Pattern detected. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_vector_vector_shift_pattern: detected:\n");
@@ -2211,7 +2211,7 @@ vect_recog_vector_vector_shift_pattern (vec<gimple *> *stmts,
var = vect_recog_temp_ssa_var (TREE_TYPE (oprnd0), NULL);
pattern_stmt = gimple_build_assign (var, rhs_code, oprnd0, def);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
stmts->safe_push (last_stmt);
@@ -2580,11 +2580,11 @@ vect_recog_mult_pattern (vec<gimple *> *stmts,
return NULL;
/* Pattern detected. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_mult_pattern: detected:\n");
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM,
pattern_stmt,0);
@@ -2702,7 +2702,7 @@ vect_recog_divmod_pattern (vec<gimple *> *stmts,
return NULL;
/* Pattern detected. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_divmod_pattern: detected:\n");
@@ -2788,7 +2788,7 @@ vect_recog_divmod_pattern (vec<gimple *> *stmts,
signmask);
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt,
0);
@@ -3042,7 +3042,7 @@ vect_recog_divmod_pattern (vec<gimple *> *stmts,
}
/* Pattern detected. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_divmod_pattern: detected: ");
@@ -3207,7 +3207,7 @@ vect_recog_mixed_size_cond_pattern (vec<gimple *> *stmts, tree *type_in,
*type_in = vecitype;
*type_out = vectype;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_mixed_size_cond_pattern: detected:\n");
@@ -3788,7 +3788,7 @@ vect_recog_bool_pattern (vec<gimple *> *stmts, tree *type_in,
*type_out = vectype;
*type_in = vectype;
stmts->safe_push (last_stmt);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_bool_pattern: detected:\n");
@@ -3829,7 +3829,7 @@ vect_recog_bool_pattern (vec<gimple *> *stmts, tree *type_in,
*type_out = vectype;
*type_in = vectype;
stmts->safe_push (last_stmt);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_bool_pattern: detected:\n");
@@ -3888,7 +3888,7 @@ vect_recog_bool_pattern (vec<gimple *> *stmts, tree *type_in,
*type_out = vectype;
*type_in = vectype;
stmts->safe_push (last_stmt);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_bool_pattern: detected:\n");
return pattern_stmt;
@@ -4025,7 +4025,7 @@ vect_recog_mask_conversion_pattern (vec<gimple *> *stmts, tree *type_in,
*type_out = vectype1;
*type_in = vectype1;
stmts->safe_push (last_stmt);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_mask_conversion_pattern: detected:\n");
@@ -4151,7 +4151,7 @@ vect_recog_mask_conversion_pattern (vec<gimple *> *stmts, tree *type_in,
*type_out = vectype1;
*type_in = vectype1;
stmts->safe_push (last_stmt);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_mask_conversion_pattern: detected:\n");
@@ -4199,7 +4199,7 @@ vect_recog_mask_conversion_pattern (vec<gimple *> *stmts, tree *type_in,
*type_out = vectype1;
*type_in = vectype1;
stmts->safe_push (last_stmt);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_mask_conversion_pattern: detected:\n");
@@ -4384,7 +4384,7 @@ vect_try_gather_scatter_pattern (gimple *stmt, stmt_vec_info last_stmt_info,
*type_out = vectype;
*type_in = vectype;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"gather/scatter pattern detected:\n");
@@ -4538,7 +4538,7 @@ vect_pattern_recog_1 (vect_recog_func *recog_func,
}
/* Found a vectorizable pattern. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"%s pattern recognized: ", recog_func->name);
@@ -4567,7 +4567,7 @@ vect_pattern_recog_1 (vect_recog_func *recog_func,
{
stmt_info = vinfo_for_stmt (stmt);
pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"additional pattern stmt: ");
@@ -350,7 +350,7 @@ again:
if (!vect_is_simple_use (oprnd, vinfo, &def_stmt, &dt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: can't analyze def for ");
@@ -386,7 +386,7 @@ again:
goto again;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: some of the stmts"
@@ -403,7 +403,7 @@ again:
if (dt == vect_unknown_def_type)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported pattern.\n");
return -1;
@@ -416,7 +416,7 @@ again:
break;
default:
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported defining stmt:\n");
return -1;
@@ -458,7 +458,7 @@ again:
goto again;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different types\n");
@@ -471,7 +471,7 @@ again:
|| !can_duplicate_and_interleave_p (stmts.length (),
TYPE_MODE (type))))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: invalid type of def "
@@ -498,7 +498,7 @@ again:
default:
/* FORNOW: Not supported. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: illegal type of def ");
@@ -517,7 +517,7 @@ again:
we've committed to the operand order and can't swap it. */
if (STMT_VINFO_NUM_SLP_USES (vinfo_for_stmt (stmt)) != 0)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: cannot swap operands of "
@@ -553,7 +553,7 @@ again:
else
swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
gimple_assign_rhs2_ptr (stmt));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"swapped operands to match def types in ");
@@ -578,7 +578,7 @@ vect_record_max_nunits (vec_info *vinfo, gimple *stmt, unsigned int group_size,
{
if (!vectype)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported data-type in ");
@@ -678,7 +678,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
swap[i] = 0;
matches[i] = false;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
@@ -687,7 +687,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
/* Fail to vectorize statements marked as unvectorizable. */
if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unvectorizable statement ");
@@ -701,7 +701,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
lhs = gimple_get_lhs (stmt);
if (lhs == NULL_TREE)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: not GIMPLE_ASSIGN nor "
@@ -736,7 +736,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
|| !gimple_call_nothrow_p (call_stmt)
|| gimple_call_chain (call_stmt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported call type ");
@@ -764,7 +764,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
{
if (vectype == boolean_type_node)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: shift of a"
" boolean.\n");
@@ -788,7 +788,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
if (!optab)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: no optab.\n");
/* Fatal mismatch. */
@@ -798,7 +798,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
icode = (int) optab_handler (optab, vec_mode);
if (icode == CODE_FOR_nothing)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: "
"op not supported by target.\n");
@@ -844,7 +844,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
|| first_stmt_code == COMPONENT_REF
|| first_stmt_code == MEM_REF)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different operation "
@@ -862,7 +862,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
if (need_same_oprnds
&& !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different shift "
@@ -882,7 +882,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
|| gimple_call_fntype (first_stmt)
!= gimple_call_fntype (stmt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different calls in ");
@@ -913,7 +913,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
chains in the same node. */
if (prev_first_load != first_load)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION,
vect_location,
@@ -935,7 +935,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
{
/* Not grouped load. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: not grouped load ");
@@ -955,7 +955,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
&& TREE_CODE_CLASS (rhs_code) != tcc_comparison
&& rhs_code != CALL_EXPR)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: operation");
@@ -993,7 +993,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
swap[i] = 2;
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different"
@@ -1027,7 +1027,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
if (gimple_assign_rhs_code (stmts[i]) == alt_stmt_code)
{
matches[i] = false;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different operation "
@@ -1374,7 +1374,7 @@ vect_build_slp_tree_2 (vec_info *vinfo,
{
if (!swap_not_matching)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION,
vect_location,
@@ -1510,7 +1510,7 @@ fail:
/* Dump a slp tree NODE using flags specified in DUMP_KIND. */
static void
-vect_print_slp_tree (dump_flags_t dump_kind, location_t loc, slp_tree node)
+vect_print_slp_tree (dump_flags_t dump_kind, optinfo_location loc, slp_tree node)
{
int i;
gimple *stmt;
@@ -1686,7 +1686,7 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
slp_tree node;
gimple *stmt, *load, *next_load;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
@@ -1865,7 +1865,7 @@ vect_split_slp_store_group (gimple *first_stmt, unsigned group1_size)
/* DR_GROUP_GAP of the first group now has to skip over the second group too. */
DR_GROUP_GAP (first_vinfo) += group2_size;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "Split group into %d and %d\n",
group1_size, group2_size);
@@ -1920,7 +1920,7 @@ vect_analyze_slp_instance (vec_info *vinfo,
if (!vectype)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported data-type ");
@@ -2000,7 +2000,7 @@ vect_analyze_slp_instance (vec_info *vinfo,
if (!max_nunits.is_constant (&const_max_nunits)
|| const_max_nunits > group_size)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: store group "
"size not a multiple of the vector size "
@@ -2063,7 +2063,7 @@ vect_analyze_slp_instance (vec_info *vinfo,
{
if (!vect_supported_load_permutation_p (new_instance))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported load "
@@ -2098,7 +2098,7 @@ vect_analyze_slp_instance (vec_info *vinfo,
}
if (i == loads.length ())
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Built SLP cancelled: can use "
"load/store-lanes\n");
@@ -2109,7 +2109,7 @@ vect_analyze_slp_instance (vec_info *vinfo,
vinfo->slp_instances.safe_push (new_instance);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"Final SLP tree for instance:\n");
@@ -2250,14 +2250,15 @@ vect_make_slp_decision (loop_vec_info loop_vinfo)
LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
- if (decided_to_slp && dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location,
- "Decided to SLP %d instances. Unrolling factor ",
- decided_to_slp);
- dump_dec (MSG_NOTE, unrolling_factor);
- dump_printf (MSG_NOTE, "\n");
- }
+ if (decided_to_slp)
+ IF_VECT_DUMP
+ {
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "Decided to SLP %d instances. Unrolling factor ",
+ decided_to_slp);
+ dump_dec (MSG_NOTE, unrolling_factor);
+ dump_printf (MSG_NOTE, "\n");
+ }
return (decided_to_slp > 0);
}
@@ -2312,7 +2313,7 @@ vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
&& !(gimple_code (use_stmt) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "use of SLP "
"def in non-SLP stmt: ");
@@ -2326,7 +2327,7 @@ vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
if (stype == hybrid
&& !HYBRID_SLP_STMT (stmt_vinfo))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
@@ -2357,7 +2358,7 @@ vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
&& PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
@@ -2774,7 +2775,7 @@ vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
@@ -2815,7 +2816,7 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
if (n_stmts > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: too many instructions in "
"basic block.\n");
@@ -2833,7 +2834,7 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
if (!vect_analyze_data_refs (bb_vinfo, &min_vf))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data-ref in basic "
"block.\n");
@@ -2844,7 +2845,7 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: not enough data-refs in "
"basic block.\n");
@@ -2855,7 +2856,7 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
if (!vect_analyze_data_ref_accesses (bb_vinfo))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data access in "
"basic block.\n");
@@ -2869,7 +2870,7 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
anyway. */
if (bb_vinfo->grouped_stores.is_empty ())
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: no grouped stores in "
"basic block.\n");
@@ -2887,7 +2888,7 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
trees. */
if (!vect_analyze_slp (bb_vinfo, n_stmts))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Failed to SLP the basic block.\n");
@@ -2934,7 +2935,7 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
if (!vect_slp_analyze_operations (bb_vinfo))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: bad operation in basic block.\n");
@@ -2946,7 +2947,7 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
if (!unlimited_cost_model (NULL)
&& !vect_bb_vectorization_profitable_p (bb_vinfo))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization is not "
"profitable.\n");
@@ -2955,7 +2956,7 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
return NULL;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Basic block will be vectorized using SLP\n");
@@ -3001,7 +3002,7 @@ vect_slp_bb (basic_block bb)
insns++;
if (gimple_location (stmt) != UNKNOWN_LOCATION)
- vect_location = gimple_location (stmt);
+ vect_location = stmt;
if (!vect_find_stmt_data_reference (NULL, stmt, &datarefs))
break;
@@ -3023,12 +3024,12 @@ vect_slp_bb (basic_block bb)
if (bb_vinfo
&& dbg_cnt (vect_slp))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB part\n");
vect_schedule_slp (bb_vinfo);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"basic block part vectorized\n");
@@ -3066,7 +3067,7 @@ vect_slp_bb (basic_block bb)
{
/* Try the next biggest vector size. */
current_vector_size = vector_sizes[next_size++];
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"***** Re-trying analysis with "
@@ -3730,7 +3731,7 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
}
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"permutation requires at "
@@ -3752,7 +3753,7 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
indices.new_vector (mask, 2, nunits);
if (!can_vec_perm_const_p (mode, indices))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION,
vect_location,
@@ -3867,7 +3868,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance,
if (!SLP_TREE_VEC_STMTS (node).exists ())
SLP_TREE_VEC_STMTS (node).create (SLP_TREE_NUMBER_OF_VEC_STMTS (node));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE,vect_location,
"------>vectorizing SLP node starting from: ");
@@ -4030,7 +4031,7 @@ vect_schedule_slp (vec_info *vinfo)
/* Schedule the tree of INSTANCE. */
is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
instance, bst_map);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vectorizing stmts using SLP.\n");
}
@@ -204,7 +204,7 @@ vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
gimple *pattern_stmt;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"mark relevant %d, live %d: ", relevant, live_p);
@@ -224,7 +224,7 @@ vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"last stmt in pattern. don't mark"
" relevant/live.\n");
@@ -242,7 +242,7 @@ vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
&& STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"already marked relevant/live.\n");
return;
@@ -272,7 +272,7 @@ is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -320,7 +320,7 @@ vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
if (gimple_vdef (stmt)
&& !gimple_clobber_p (stmt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vec_stmt_relevant_p: stmt has vdefs.\n");
*relevant = vect_used_in_scope;
@@ -334,7 +334,7 @@ vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
basic_block bb = gimple_bb (USE_STMT (use_p));
if (!flow_bb_inside_loop_p (loop, bb))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vec_stmt_relevant_p: used out of loop.\n");
@@ -354,7 +354,7 @@ vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
if (*live_p && *relevant == vect_unused_in_scope
&& !is_simple_and_all_uses_invariant (stmt, loop_vinfo))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vec_stmt_relevant_p: stmt live but not relevant.\n");
*relevant = vect_used_only_live;
@@ -474,7 +474,7 @@ process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported use in stmt.\n");
return false;
@@ -486,7 +486,7 @@ process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
def_bb = gimple_bb (def_stmt);
if (!flow_bb_inside_loop_p (loop, def_bb))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
return true;
}
@@ -504,7 +504,7 @@ process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
&& STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
&& bb->loop_father == def_bb->loop_father)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"reduc-stmt defining reduc-phi in the same nest.\n");
if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
@@ -524,7 +524,7 @@ process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
... */
if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"outer-loop def-stmt defining inner-loop stmt.\n");
@@ -562,7 +562,7 @@ process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
stmt # use (d) */
else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"inner-loop def-stmt defining outer-loop stmt.\n");
@@ -597,7 +597,7 @@ process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
&& (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father))
== use))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"induction value on backedge.\n");
return true;
@@ -651,7 +651,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
{
phi = gsi_stmt (si);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
@@ -663,7 +663,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
stmt = gsi_stmt (si);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
@@ -681,7 +681,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
ssa_op_iter iter;
stmt = worklist.pop ();
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
@@ -713,7 +713,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
&& relevant != vect_used_by_reduction
&& relevant != vect_used_only_live)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of reduction.\n");
return false;
@@ -725,7 +725,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
&& relevant != vect_used_in_outer_by_reduction
&& relevant != vect_used_in_outer)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of nested cycle.\n");
@@ -738,7 +738,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
&& relevant != vect_used_by_reduction
&& relevant != vect_used_only_live)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of double reduction.\n");
@@ -939,7 +939,7 @@ vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
inside_cost += record_stmt_cost (cost_vec, ncopies, vector_stmt,
stmt_info, 0, vect_body);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_simple_cost: inside_cost = %d, "
"prologue_cost = %d .\n", inside_cost, prologue_cost);
@@ -974,7 +974,7 @@ vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
prologue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
stmt_info, 0, vect_prologue);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_promotion_demotion_cost: inside_cost = %d, "
"prologue_cost = %d .\n", inside_cost, prologue_cost);
@@ -1034,7 +1034,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: strided group_size = %d .\n",
group_size);
@@ -1064,7 +1064,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
vec_to_scalar, stmt_info, 0, vect_body);
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: inside_cost = %d, "
"prologue_cost = %d .\n", inside_cost, prologue_cost);
@@ -1088,7 +1088,7 @@ vect_get_store_cost (stmt_vec_info stmt_info, int ncopies,
vector_store, stmt_info, 0,
vect_body);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: aligned.\n");
break;
@@ -1100,7 +1100,7 @@ vect_get_store_cost (stmt_vec_info stmt_info, int ncopies,
*inside_cost += record_stmt_cost (body_cost_vec, ncopies,
unaligned_store, stmt_info,
DR_MISALIGNMENT (dr), vect_body);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: unaligned supported by "
"hardware.\n");
@@ -1111,7 +1111,7 @@ vect_get_store_cost (stmt_vec_info stmt_info, int ncopies,
{
*inside_cost = VECT_MAX_COST;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vect_model_store_cost: unsupported access.\n");
break;
@@ -1214,7 +1214,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: strided group_size = %d .\n",
group_size);
@@ -1240,7 +1240,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
inside_cost += record_stmt_cost (cost_vec, ncopies, vec_construct,
stmt_info, 0, vect_body);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: inside_cost = %d, "
"prologue_cost = %d .\n", inside_cost, prologue_cost);
@@ -1266,7 +1266,7 @@ vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
*inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
stmt_info, 0, vect_body);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: aligned.\n");
@@ -1279,7 +1279,7 @@ vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
unaligned_load, stmt_info,
DR_MISALIGNMENT (dr), vect_body);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: unaligned supported by "
"hardware.\n");
@@ -1300,7 +1300,7 @@ vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
*inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
stmt_info, 0, vect_body);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: explicit realign\n");
@@ -1308,7 +1308,7 @@ vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
}
case dr_explicit_realign_optimized:
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: unaligned software "
"pipelined.\n");
@@ -1336,7 +1336,7 @@ vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
*inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
stmt_info, 0, vect_body);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: explicit realign optimized"
"\n");
@@ -1348,7 +1348,7 @@ vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
{
*inside_cost = VECT_MAX_COST;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vect_model_load_cost: unsupported access.\n");
break;
@@ -1398,7 +1398,7 @@ vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
}
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"created new init_stmt: ");
@@ -1559,7 +1559,7 @@ vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_get_vec_def_for_operand: ");
@@ -1569,11 +1569,12 @@ vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
gcc_assert (is_simple_use);
- if (def_stmt && dump_enabled_p ())
- {
- dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
- dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
- }
+ if (def_stmt)
+ IF_VECT_DUMP
+ {
+ dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
+ dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
+ }
if (dt == vect_constant_def || dt == vect_external_def)
{
@@ -1750,7 +1751,7 @@ vect_finish_stmt_generation_1 (gimple *stmt, gimple *vec_stmt)
set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
@@ -1887,7 +1888,7 @@ check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
? !vect_load_lanes_supported (vectype, group_size, true)
: !vect_store_lanes_supported (vectype, group_size, true))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because the"
" target doesn't have an appropriate masked"
@@ -1911,7 +1912,7 @@ check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
TYPE_SIGN (offset_type),
gs_info->scale))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because the"
" target doesn't have an appropriate masked"
@@ -1929,7 +1930,7 @@ check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
{
/* Element X of the data must come from iteration i * VF + X of the
scalar loop. We need more work to support other mappings. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because an access"
" isn't contiguous.\n");
@@ -1943,7 +1944,7 @@ check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
GET_MODE_SIZE (vecmode)).exists (&mask_mode))
|| !can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because the target"
" doesn't have the appropriate masked load or"
@@ -2009,7 +2010,7 @@ vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo,
if (TREE_CODE (step) != INTEGER_CST)
{
/* ??? Perhaps we could use range information here? */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"cannot truncate variable step.\n");
return false;
@@ -2075,10 +2076,11 @@ vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo,
return true;
}
- if (overflow_p && dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "truncating gather/scatter offset to %d bits"
- " might change its value.\n", element_bits);
+ if (overflow_p)
+ IF_VECT_DUMP
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "truncating gather/scatter offset to %d bits"
+ " might change its value.\n", element_bits);
return false;
}
@@ -2116,7 +2118,7 @@ vect_use_strided_gather_scatters_p (gimple *stmt, loop_vec_info loop_vinfo,
gs_info->offset = fold_convert (offset_type, gs_info->offset);
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"using gather/scatter for strided/grouped access,"
" scale = %d\n", gs_info->scale);
@@ -2254,7 +2256,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
overrun_p = false;
if (overrun_p && !can_overrun_p)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Peeling for outer loop is not supported\n");
return false;
@@ -2340,7 +2342,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
enum vect_def_type dt;
if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -2352,7 +2354,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
if (overrun_p)
{
gcc_assert (can_overrun_p);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Data access with gaps requires scalar "
"epilogue loop\n");
@@ -2377,7 +2379,7 @@ get_negative_load_store_type (gimple *stmt, tree vectype,
if (ncopies > 1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types with negative step.\n");
return VMAT_ELEMENTWISE;
@@ -2387,7 +2389,7 @@ get_negative_load_store_type (gimple *stmt, tree vectype,
if (alignment_support_scheme != dr_aligned
&& alignment_support_scheme != dr_unaligned_supported)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"negative step but alignment required.\n");
return VMAT_ELEMENTWISE;
@@ -2395,7 +2397,7 @@ get_negative_load_store_type (gimple *stmt, tree vectype,
if (vls_type == VLS_STORE_INVARIANT)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"negative step with invariant source;"
" no permute needed.\n");
@@ -2404,7 +2406,7 @@ get_negative_load_store_type (gimple *stmt, tree vectype,
if (!perm_mask_for_reverse (vectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"negative step and reversing not supported.\n");
return VMAT_ELEMENTWISE;
@@ -2443,7 +2445,7 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
&gs_info->offset_dt,
&gs_info->offset_vectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"%s index use not simple.\n",
vls_type == VLS_LOAD ? "gather" : "scatter");
@@ -2485,7 +2487,7 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
|| *memory_access_type == VMAT_STRIDED_SLP)
&& !nunits.is_constant ())
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Not using elementwise accesses due to variable "
"vectorization factor.\n");
@@ -2501,7 +2503,7 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
&& !DR_GROUP_NEXT_ELEMENT (stmt_info)
&& !pow2p_hwi (DR_GROUP_SIZE (stmt_info))))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not falling back to elementwise accesses\n");
return false;
@@ -2521,7 +2523,7 @@ vect_check_load_store_mask (gimple *stmt, tree mask,
{
if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"mask argument is not a boolean.\n");
return false;
@@ -2529,7 +2531,7 @@ vect_check_load_store_mask (gimple *stmt, tree mask,
if (TREE_CODE (mask) != SSA_NAME)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"mask argument is not an SSA name.\n");
return false;
@@ -2542,7 +2544,7 @@ vect_check_load_store_mask (gimple *stmt, tree mask,
if (!vect_is_simple_use (mask, stmt_info->vinfo, &def_stmt, &mask_dt,
&mask_vectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"mask use not simple.\n");
return false;
@@ -2554,7 +2556,7 @@ vect_check_load_store_mask (gimple *stmt, tree mask,
if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"could not find an appropriate vector mask type.\n");
return false;
@@ -2563,7 +2565,7 @@ vect_check_load_store_mask (gimple *stmt, tree mask,
if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
TYPE_VECTOR_SUBPARTS (vectype)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vector mask type ");
@@ -2594,7 +2596,7 @@ vect_check_store_rhs (gimple *stmt, tree rhs, vect_def_type *rhs_dt_out,
native_encode_expr can handle it. */
if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cannot encode constant as a byte sequence.\n");
return false;
@@ -2607,7 +2609,7 @@ vect_check_store_rhs (gimple *stmt, tree rhs, vect_def_type *rhs_dt_out,
if (!vect_is_simple_use (rhs, stmt_info->vinfo, &def_stmt, &rhs_dt,
&rhs_vectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -2616,7 +2618,7 @@ vect_check_store_rhs (gimple *stmt, tree rhs, vect_def_type *rhs_dt_out,
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"incompatible vector types.\n");
return false;
@@ -3202,7 +3204,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (rhs_type
&& !types_compatible_p (rhs_type, TREE_TYPE (op)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"argument types differ.\n");
return false;
@@ -3212,7 +3214,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -3223,7 +3225,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
else if (opvectype
&& opvectype != vectype_in)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"argument vector types differ.\n");
return false;
@@ -3237,7 +3239,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
gcc_assert (vectype_in);
if (!vectype_in)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
@@ -3263,7 +3265,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* We only handle functions that do not read or clobber memory. */
if (gimple_vuse (stmt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"function reads from or writes to memory.\n");
return false;
@@ -3321,7 +3323,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
vectype_in, dt, cost_vec);
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"function is not vectorizable.\n");
return false;
@@ -3353,7 +3355,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* Transform. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
/* Handle def. */
@@ -3799,7 +3801,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
&thisarginfo.vectype)
|| thisarginfo.dt == vect_uninitialized_def)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -3874,7 +3876,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
unsigned HOST_WIDE_INT vf;
if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not considering SIMD clones; not yet supported"
" for variable-width vectors.\n");
@@ -4024,7 +4026,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
/* Transform. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
/* Handle def. */
@@ -4644,7 +4646,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
|| (INTEGRAL_TYPE_P (rhs_type)
&& !type_has_mode_precision_p (rhs_type))))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"type conversion to/from bit-precision unsupported."
"\n");
@@ -4654,7 +4656,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
/* Check the operands of the operation. */
if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -4674,7 +4676,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
if (!ok)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -4689,7 +4691,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
gcc_assert (vectype_in);
if (!vectype_in)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
@@ -4703,7 +4705,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
&& !VECTOR_BOOLEAN_TYPE_P (vectype_in))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't convert between boolean and non "
@@ -4757,7 +4759,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
break;
/* FALLTHRU */
unsupported:
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"conversion not supported by target.\n");
return false;
@@ -4881,7 +4883,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
}
/* Transform. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"transform conversion. ncopies = %d.\n", ncopies);
@@ -5227,7 +5229,7 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -5259,7 +5261,7 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
&& (!VECTOR_BOOLEAN_TYPE_P (vectype)
|| !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"type conversion to/from bit-precision "
"unsupported.\n");
@@ -5275,7 +5277,7 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
}
/* Transform. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
/* Handle def. */
@@ -5424,7 +5426,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
vectype_out = STMT_VINFO_VECTYPE (stmt_info);
if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bit-precision shifts not supported.\n");
return false;
@@ -5433,7 +5435,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
op0 = gimple_assign_rhs1 (stmt);
if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -5446,7 +5448,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
gcc_assert (vectype);
if (!vectype)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type\n");
return false;
@@ -5460,7 +5462,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
op1 = gimple_assign_rhs2 (stmt);
if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -5512,7 +5514,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
}
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"operand mode requires invariant argument.\n");
return false;
@@ -5522,7 +5524,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
if (!scalar_shift_arg)
{
optab = optab_for_tree_code (code, vectype, optab_vector);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vector/vector shift/rotate found.\n");
@@ -5531,7 +5533,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
if (op1_vectype == NULL_TREE
|| TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unusable type for last operand in"
" vector/vector shift/rotate.\n");
@@ -5546,7 +5548,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
if (optab
&& optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vector/scalar shift/rotate found.\n");
}
@@ -5559,7 +5561,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
{
scalar_shift_arg = false;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vector/vector shift/rotate found.\n");
@@ -5576,7 +5578,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
&& TYPE_MODE (TREE_TYPE (vectype))
!= TYPE_MODE (TREE_TYPE (op1)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unusable type for last operand in"
" vector/vector shift/rotate.\n");
@@ -5596,7 +5598,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
/* Supportable by target? */
if (!optab)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.\n");
return false;
@@ -5605,7 +5607,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
icode = (int) optab_handler (optab, vec_mode);
if (icode == CODE_FOR_nothing)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"op not supported by target.\n");
/* Check only during analysis. */
@@ -5613,7 +5615,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
|| (!vec_stmt
&& !vect_worthwhile_without_simd_p (vinfo, code)))
return false;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"proceeding using word mode.\n");
}
@@ -5623,7 +5625,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
&& !VECTOR_MODE_P (TYPE_MODE (vectype))
&& !vect_worthwhile_without_simd_p (vinfo, code))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.\n");
return false;
@@ -5639,7 +5641,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
/* Transform. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"transform binary/unary operation.\n");
@@ -5661,7 +5663,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
optab_op2_mode = insn_data[icode].operand[2].mode;
if (!VECTOR_MODE_P (optab_op2_mode))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"operand 1 using scalar mode.\n");
vec_oprnd1 = op1;
@@ -5792,7 +5794,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
op_type = TREE_CODE_LENGTH (code);
if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"num. args = %d (not unary/binary/ternary op).\n",
op_type);
@@ -5811,7 +5813,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
&& code != BIT_XOR_EXPR
&& code != BIT_AND_EXPR)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bit-precision arithmetic not supported.\n");
return false;
@@ -5820,7 +5822,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
op0 = gimple_assign_rhs1 (stmt);
if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -5838,7 +5840,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
{
if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not supported operation on bool value.\n");
return false;
@@ -5852,7 +5854,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
gcc_assert (vectype);
if (!vectype)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
@@ -5874,7 +5876,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
op1 = gimple_assign_rhs2 (stmt);
if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -5885,7 +5887,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
op2 = gimple_assign_rhs3 (stmt);
if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.\n");
return false;
@@ -5917,7 +5919,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
optab = optab_for_tree_code (code, vectype, optab_default);
if (!optab)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.\n");
return false;
@@ -5928,14 +5930,14 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
if (!target_support_p)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"op not supported by target.\n");
/* Check only during analysis. */
if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
|| (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
return false;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"proceeding using word mode.\n");
}
@@ -5945,7 +5947,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
&& !vec_stmt
&& !vect_worthwhile_without_simd_p (vinfo, code))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.\n");
return false;
@@ -5961,7 +5963,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
/* Transform. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"transform binary/unary operation.\n");
@@ -6163,7 +6165,7 @@ get_group_alias_ptr_type (gimple *first_stmt)
if (get_alias_set (DR_REF (first_dr))
!= get_alias_set (DR_REF (next_dr)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"conflicting alias set types.\n");
return ptr_type_node;
@@ -6262,7 +6264,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (slp_node != NULL)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"SLP of masked stores not supported.\n");
return false;
@@ -6308,7 +6310,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* FORNOW. This restriction should be relaxed. */
if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.\n");
return false;
@@ -6340,7 +6342,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
else if (memory_access_type != VMAT_LOAD_STORE_LANES
&& (memory_access_type != VMAT_GATHER_SCATTER || gs_info.decl))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported access type for masked store.\n");
return false;
@@ -6582,7 +6584,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
else
ref_type = reference_alias_ptr_type (DR_REF (first_dr));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"transform store. ncopies = %d\n", ncopies);
@@ -7434,7 +7436,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (slp_node != NULL)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"SLP of masked loads not supported.\n");
return false;
@@ -7478,7 +7480,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* FORNOW. This restriction should be relaxed. */
if (nested_in_vect_loop && ncopies > 1)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.\n");
return false;
@@ -7491,7 +7493,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
&& maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
STMT_VINFO_MIN_NEG_DIST (stmt_info)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cannot perform implicit CSE when unrolling "
"with negative dependence distance\n");
@@ -7505,7 +7507,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
(e.g. - data copies). */
if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Aligned load, but unsupported type.\n");
return false;
@@ -7532,7 +7534,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
&& maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
STMT_VINFO_MIN_NEG_DIST (stmt_info)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cannot perform implicit CSE when performing "
"group loads with negative dependence distance\n");
@@ -7547,7 +7549,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
!= STMT_SLP_TYPE (vinfo_for_stmt
(DR_GROUP_SAME_DR_STMT (stmt_info)))))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"conflicting SLP types for CSEd load\n");
return false;
@@ -7578,7 +7580,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
= TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
if (TREE_CODE (masktype) == INTEGER_TYPE)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"masked gather with integer mask not"
" supported.");
@@ -7588,7 +7590,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
else if (memory_access_type != VMAT_LOAD_STORE_LANES
&& memory_access_type != VMAT_GATHER_SCATTER)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported access type for masked load.\n");
return false;
@@ -7615,7 +7617,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"transform load. ncopies = %d\n", ncopies);
@@ -8452,7 +8454,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
&& !nested_in_vect_loop
&& hoist_defs_of_uses (stmt, loop))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"hoisting out of the vectorized "
@@ -8714,7 +8716,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
/* FORNOW: not yet supported. */
if (STMT_VINFO_LIVE_P (stmt_info))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"value used after loop.\n");
return false;
@@ -9116,7 +9118,7 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
if (STMT_VINFO_LIVE_P (stmt_info))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"value used after loop.\n");
return false;
@@ -9378,7 +9380,7 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
gimple *pattern_stmt;
gimple_seq pattern_def_seq;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
@@ -9386,7 +9388,7 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
if (gimple_has_volatile_ops (stmt))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: stmt has volatile operands\n");
@@ -9419,7 +9421,7 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
/* Analyze PATTERN_STMT instead of the original stmt. */
stmt = pattern_stmt;
stmt_info = vinfo_for_stmt (pattern_stmt);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
@@ -9428,7 +9430,7 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
}
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
return true;
@@ -9441,7 +9443,7 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
{
/* Analyze PATTERN_STMT too. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
@@ -9466,7 +9468,7 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
{
/* Analyze def stmt of STMT if it's a pattern stmt. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def statement: ");
@@ -9560,7 +9562,7 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
if (!ok)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: relevant stmt not ");
@@ -9577,7 +9579,7 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
&& STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
&& !can_vectorize_live_stmts (stmt, NULL, node, NULL, cost_vec))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: live stmt not supported: ");
@@ -9696,7 +9698,7 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
default:
if (!STMT_VINFO_LIVE_P (stmt_info))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"stmt not supported.\n");
gcc_unreachable ();
@@ -9725,7 +9727,7 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
tree scalar_dest;
gimple *exit_phi;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"Record the vdef for outer-loop vectorization.\n");
@@ -10062,7 +10064,7 @@ vect_is_simple_use (tree operand, vec_info *vinfo,
*def_stmt = NULL;
*dt = vect_unknown_def_type;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_is_simple_use: operand ");
@@ -10084,7 +10086,7 @@ vect_is_simple_use (tree operand, vec_info *vinfo,
if (TREE_CODE (operand) != SSA_NAME)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not ssa-name.\n");
return false;
@@ -10097,7 +10099,7 @@ vect_is_simple_use (tree operand, vec_info *vinfo,
}
*def_stmt = SSA_NAME_DEF_STMT (operand);
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
@@ -10111,7 +10113,7 @@ vect_is_simple_use (tree operand, vec_info *vinfo,
*dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
switch (*dt)
@@ -10148,7 +10150,7 @@ vect_is_simple_use (tree operand, vec_info *vinfo,
if (*dt == vect_unknown_def_type)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported pattern.\n");
return false;
@@ -10161,7 +10163,7 @@ vect_is_simple_use (tree operand, vec_info *vinfo,
case GIMPLE_CALL:
break;
default:
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported defining stmt:\n");
return false;
@@ -10682,13 +10684,13 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
#pragma omp simd functions, and what vectorization factor
it really needs can't be determined until
vectorizable_simd_clone_call. */
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"defer to SIMD clone analysis.\n");
return true;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: irregular stmt.");
@@ -10699,7 +10701,7 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vector stmt in loop:");
@@ -10734,14 +10736,14 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
scalar_type = TREE_TYPE (rhs1);
else
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"pure bool operation.\n");
return true;
}
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
@@ -10751,7 +10753,7 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported data-type ");
@@ -10765,7 +10767,7 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
if (!*stmt_vectype_out)
*stmt_vectype_out = vectype;
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
@@ -10788,7 +10790,7 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
HOST_WIDE_INT dummy;
scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
@@ -10799,7 +10801,7 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
}
if (!nunits_vectype)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported data-type ");
@@ -10812,7 +10814,7 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
GET_MODE_SIZE (TYPE_MODE (nunits_vectype))))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: different sized vector "
@@ -10825,7 +10827,7 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
return false;
}
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, nunits_vectype);
@@ -10860,7 +10862,7 @@ vect_get_mask_type_for_stmt (stmt_vec_info stmt_info)
if (!mask_type)
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported mask\n");
return NULL_TREE;
@@ -10878,7 +10880,7 @@ vect_get_mask_type_for_stmt (stmt_vec_info stmt_info)
if (!vect_is_simple_use (rhs, stmt_info->vinfo,
&def_stmt, &dt, &vectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't compute mask type "
@@ -10900,7 +10902,7 @@ vect_get_mask_type_for_stmt (stmt_vec_info stmt_info)
else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
TYPE_VECTOR_SUBPARTS (vectype)))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: different sized masks "
@@ -10917,7 +10919,7 @@ vect_get_mask_type_for_stmt (stmt_vec_info stmt_info)
else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
!= VECTOR_BOOLEAN_TYPE_P (vectype))
{
- if (dump_enabled_p ())
+ IF_VECT_DUMP
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: mixed mask and "
@@ -10944,12 +10946,13 @@ vect_get_mask_type_for_stmt (stmt_vec_info stmt_info)
/* No mask_type should mean loop invariant predicate.
This is probably a subject for optimization in if-conversion. */
- if (!mask_type && dump_enabled_p ())
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "not vectorized: can't compute mask type "
- "for statement, ");
- dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- }
+ if (!mask_type)
+ IF_VECT_DUMP
+ {
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: can't compute mask type "
+ "for statement, ");
+ dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+ }
return mask_type;
}
@@ -749,11 +749,12 @@ vectorize_loops (void)
loop_dist_alias_call = vect_loop_dist_alias_call (loop);
vectorize_epilogue:
vect_location = find_loop_location (loop);
- if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
- && dump_enabled_p ())
- dump_printf (MSG_NOTE, "\nAnalyzing loop at %s:%d\n",
- LOCATION_FILE (vect_location),
- LOCATION_LINE (vect_location));
+ VECT_SCOPE ("analyzing loop");
+ if (LOCATION_LOCUS (vect_location.get_location_t ()) != UNKNOWN_LOCATION)
+ IF_VECT_DUMP
+ dump_printf (MSG_NOTE, "\nAnalyzing loop at %s:%d\n",
+ LOCATION_FILE (vect_location.get_location_t ()),
+ LOCATION_LINE (vect_location.get_location_t ()));
loop_vinfo = vect_analyze_loop (loop, orig_loop_vinfo);
loop->aux = loop_vinfo;
@@ -827,10 +828,10 @@ vectorize_loops (void)
if (loop_vectorized_call)
set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
- if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
- && dump_enabled_p ())
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
- "loop vectorized\n");
+ if (LOCATION_LOCUS (vect_location.get_location_t ()) != UNKNOWN_LOCATION)
+ IF_VECT_DUMP
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
+ "loop vectorized\n");
new_loop = vect_transform_loop (loop_vinfo);
num_vectorized_loops++;
/* Now that the loop has been vectorized, allow it to be unrolled
@@ -875,8 +876,7 @@ vectorize_loops (void)
vect_location = optinfo_location ();
statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
- if (dump_enabled_p ()
- || (num_vectorized_loops > 0 && dump_enabled_p ()))
+ IF_VECT_DUMP
dump_printf_loc (MSG_NOTE, vect_location,
"vectorized %u loops in function.\n",
num_vectorized_loops);