diff mbox

Fix PR57077 (issue8840045)

Message ID 20130426185232.D132D808B4@tjsboxrox.mtv.corp.google.com
State New
Headers show

Commit Message

Teresa Johnson April 26, 2013, 6:52 p.m. UTC
This patch fixes PR57077. Certain new uses of apply_probability
are actually scaling the counts up, and the scale factor should not 
be treated as a probability as the value may exceed REG_BR_PROB_BASE.
One example (from the PR) is when scaling counts up in LTO when merging
profiles. Another example I found when preparing the patch to use
the rounding divide in more places is when inlining COMDAT functions.

Add new helper function apply_scale that does the scaling without
the probability range check. I audited the new uses of apply_probability
and changed the calls as appropriate.

Profilebootstrapped and tested on x86_64-unknown-linux-gnu. Verified that this
fixes the lto-bootstrap issue. Ok for trunk?

2013-04-26  Teresa Johnson  <tejohnson@google.com>

	* basic-block.h (apply_scale): New function.
	(apply_probability): Use apply_scale.
	* gimple-streamer-in.c (input_bb): Ditto.
	* lto-streamer-in.c (input_cfg): Ditto.
	* lto-cgraph.c (merge_profile_summaries): Ditto.
	* tree-optimize.c (execute_fixup_cfg): Ditto.
	* tree-inline.c (copy_bb): Update comment to use
        apply_scale.
	(copy_edges_for_bb): Ditto.
	(copy_cfg_body): Ditto.


--
This patch is available for review at http://codereview.appspot.com/8840045

Comments

Richard Biener April 29, 2013, 7:58 a.m. UTC | #1
On Fri, Apr 26, 2013 at 8:52 PM, Teresa Johnson <tejohnson@google.com> wrote:
> This patch fixes PR57077. Certain new uses of apply_probability
> are actually scaling the counts up, and the scale factor should not
> be treated as a probability as the value may exceed REG_BR_PROB_BASE.
> One example (from the PR) is when scaling counts up in LTO when merging
> profiles. Another example I found when preparing the patch to use
> the rounding divide in more places is when inlining COMDAT functions.
>
> Add new helper function apply_scale that does the scaling without
> the probability range check. I audited the new uses of apply_probability
> and changed the calls as appropriate.
>
> Profilebootstrapped and tested on x86_64-unknown-linux-gnu. Verified that this
> fixes the lto-bootstrap issue. Ok for trunk?

Ok.

Thanks,
Richard.

> 2013-04-26  Teresa Johnson  <tejohnson@google.com>
>
>         * basic-block.h (apply_scale): New function.
>         (apply_probability): Use apply_scale.
>         * gimple-streamer-in.c (input_bb): Ditto.
>         * lto-streamer-in.c (input_cfg): Ditto.
>         * lto-cgraph.c (merge_profile_summaries): Ditto.
>         * tree-optimize.c (execute_fixup_cfg): Ditto.
>         * tree-inline.c (copy_bb): Update comment to use
>         apply_scale.
>         (copy_edges_for_bb): Ditto.
>         (copy_cfg_body): Ditto.
>
> Index: gimple-streamer-in.c
> ===================================================================
> --- gimple-streamer-in.c        (revision 198344)
> +++ gimple-streamer-in.c        (working copy)
> @@ -329,8 +329,8 @@ input_bb (struct lto_input_block *ib, enum LTO_tag
>    index = streamer_read_uhwi (ib);
>    bb = BASIC_BLOCK_FOR_FUNCTION (fn, index);
>
> -  bb->count = apply_probability (streamer_read_gcov_count (ib),
> -                                 count_materialization_scale);
> +  bb->count = apply_scale (streamer_read_gcov_count (ib),
> +                           count_materialization_scale);
>    bb->frequency = streamer_read_hwi (ib);
>    bb->flags = streamer_read_hwi (ib);
>
> Index: lto-streamer-in.c
> ===================================================================
> --- lto-streamer-in.c   (revision 198344)
> +++ lto-streamer-in.c   (working copy)
> @@ -635,8 +635,8 @@ input_cfg (struct lto_input_block *ib, struct func
>
>           dest_index = streamer_read_uhwi (ib);
>           probability = (int) streamer_read_hwi (ib);
> -         count = apply_probability ((gcov_type) streamer_read_gcov_count (ib),
> -                                     count_materialization_scale);
> +         count = apply_scale ((gcov_type) streamer_read_gcov_count (ib),
> +                               count_materialization_scale);
>           edge_flags = streamer_read_uhwi (ib);
>
>           dest = BASIC_BLOCK_FOR_FUNCTION (fn, dest_index);
> Index: tree-inline.c
> ===================================================================
> --- tree-inline.c       (revision 198344)
> +++ tree-inline.c       (working copy)
> @@ -1519,7 +1519,7 @@ copy_bb (copy_body_data *id, basic_block bb, int f
>       basic_block_info automatically.  */
>    copy_basic_block = create_basic_block (NULL, (void *) 0,
>                                           (basic_block) prev->aux);
> -  /* Update to use apply_probability().  */
> +  /* Update to use apply_scale().  */
>    copy_basic_block->count = bb->count * count_scale / REG_BR_PROB_BASE;
>
>    /* We are going to rebuild frequencies from scratch.  These values
> @@ -1891,7 +1891,7 @@ copy_edges_for_bb (basic_block bb, gcov_type count
>             && old_edge->dest->aux != EXIT_BLOCK_PTR)
>           flags |= EDGE_FALLTHRU;
>         new_edge = make_edge (new_bb, (basic_block) old_edge->dest->aux, flags);
> -        /* Update to use apply_probability().  */
> +        /* Update to use apply_scale().  */
>         new_edge->count = old_edge->count * count_scale / REG_BR_PROB_BASE;
>         new_edge->probability = old_edge->probability;
>        }
> @@ -2278,7 +2278,7 @@ copy_cfg_body (copy_body_data * id, gcov_type coun
>             incoming_frequency += EDGE_FREQUENCY (e);
>             incoming_count += e->count;
>           }
> -      /* Update to use apply_probability().  */
> +      /* Update to use apply_scale().  */
>        incoming_count = incoming_count * count_scale / REG_BR_PROB_BASE;
>        /* Update to use EDGE_FREQUENCY.  */
>        incoming_frequency
> Index: tree-optimize.c
> ===================================================================
> --- tree-optimize.c     (revision 198344)
> +++ tree-optimize.c     (working copy)
> @@ -131,15 +131,15 @@ execute_fixup_cfg (void)
>                              ENTRY_BLOCK_PTR->count);
>
>    ENTRY_BLOCK_PTR->count = cgraph_get_node (current_function_decl)->count;
> -  EXIT_BLOCK_PTR->count = apply_probability (EXIT_BLOCK_PTR->count,
> -                                             count_scale);
> +  EXIT_BLOCK_PTR->count = apply_scale (EXIT_BLOCK_PTR->count,
> +                                       count_scale);
>
>    FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
> -    e->count = apply_probability (e->count, count_scale);
> +    e->count = apply_scale (e->count, count_scale);
>
>    FOR_EACH_BB (bb)
>      {
> -      bb->count = apply_probability (bb->count, count_scale);
> +      bb->count = apply_scale (bb->count, count_scale);
>        for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
>         {
>           gimple stmt = gsi_stmt (gsi);
> @@ -172,7 +172,7 @@ execute_fixup_cfg (void)
>         }
>
>        FOR_EACH_EDGE (e, ei, bb->succs)
> -        e->count = apply_probability (e->count, count_scale);
> +        e->count = apply_scale (e->count, count_scale);
>
>        /* If we have a basic block with no successors that does not
>          end with a control statement or a noreturn call end it with
> Index: lto-cgraph.c
> ===================================================================
> --- lto-cgraph.c        (revision 198344)
> +++ lto-cgraph.c        (working copy)
> @@ -1347,10 +1347,10 @@ merge_profile_summaries (struct lto_file_decl_data
>                                          file_data->profile_info.runs);
>         lto_gcov_summary.sum_max
>              = MAX (lto_gcov_summary.sum_max,
> -                   apply_probability (file_data->profile_info.sum_max, scale));
> +                   apply_scale (file_data->profile_info.sum_max, scale));
>         lto_gcov_summary.sum_all
>              = MAX (lto_gcov_summary.sum_all,
> -                   apply_probability (file_data->profile_info.sum_all, scale));
> +                   apply_scale (file_data->profile_info.sum_all, scale));
>          /* Save a pointer to the profile_info with the largest
>             scaled sum_all and the scale for use in merging the
>             histogram.  */
> @@ -1372,8 +1372,8 @@ merge_profile_summaries (struct lto_file_decl_data
>        /* Scale up the min value as we did the corresponding sum_all
>           above. Use that to find the new histogram index.  */
>        gcov_type scaled_min
> -          = apply_probability (saved_profile_info->histogram[h_ix].min_value,
> -                               saved_scale);
> +          = apply_scale (saved_profile_info->histogram[h_ix].min_value,
> +                         saved_scale);
>        /* The new index may be shared with another scaled histogram entry,
>           so we need to account for a non-zero histogram entry at new_ix.  */
>        unsigned new_ix = gcov_histo_index (scaled_min);
> @@ -1386,8 +1386,8 @@ merge_profile_summaries (struct lto_file_decl_data
>           here and place the scaled cumulative counter value in the bucket
>           corresponding to the scaled minimum counter value.  */
>        lto_gcov_summary.histogram[new_ix].cum_value
> -          += apply_probability (saved_profile_info->histogram[h_ix].cum_value,
> -                                saved_scale);
> +          += apply_scale (saved_profile_info->histogram[h_ix].cum_value,
> +                          saved_scale);
>        lto_gcov_summary.histogram[new_ix].num_counters
>            += saved_profile_info->histogram[h_ix].num_counters;
>      }
> @@ -1419,8 +1419,8 @@ merge_profile_summaries (struct lto_file_decl_data
>         if (scale == REG_BR_PROB_BASE)
>           continue;
>         for (edge = node->callees; edge; edge = edge->next_callee)
> -         edge->count = apply_probability (edge->count, scale);
> -       node->count = apply_probability (node->count, scale);
> +         edge->count = apply_scale (edge->count, scale);
> +       node->count = apply_scale (node->count, scale);
>        }
>  }
>
> Index: basic-block.h
> ===================================================================
> --- basic-block.h       (revision 198344)
> +++ basic-block.h       (working copy)
> @@ -500,7 +500,7 @@ struct edge_list
>                                               REG_BR_PROB_BASE)
>
>  /* Compute a scale factor (or probability) suitable for scaling of
> -   gcov_type values via apply_probability().  */
> +   gcov_type values via apply_probability() and apply_scale().  */
>  #define GCOV_COMPUTE_SCALE(num,den) \
>    ((den) ? RDIV ((num) * REG_BR_PROB_BASE, (den)) : REG_BR_PROB_BASE)
>
> @@ -952,13 +952,23 @@ combine_probabilities (int prob1, int prob2)
>    return RDIV (prob1 * prob2, REG_BR_PROB_BASE);
>  }
>
> +/* Apply scale factor SCALE on frequency or count FREQ. Use this
> +   interface when potentially scaling up, so that SCALE is not
> +   constrained to be < REG_BR_PROB_BASE.  */
> +
> +static inline gcov_type
> +apply_scale (gcov_type freq, int scale)
> +{
> +  return RDIV (freq * scale, REG_BR_PROB_BASE);
> +}
> +
>  /* Apply probability PROB on frequency or count FREQ.  */
>
>  static inline gcov_type
>  apply_probability (gcov_type freq, int prob)
>  {
>    check_probability (prob);
> -  return RDIV (freq * prob, REG_BR_PROB_BASE);
> +  return apply_scale (freq, prob);
>  }
>
>  /* Return inverse probability for PROB.  */
>
> --
> This patch is available for review at http://codereview.appspot.com/8840045
diff mbox

Patch

Index: gimple-streamer-in.c
===================================================================
--- gimple-streamer-in.c	(revision 198344)
+++ gimple-streamer-in.c	(working copy)
@@ -329,8 +329,8 @@  input_bb (struct lto_input_block *ib, enum LTO_tag
   index = streamer_read_uhwi (ib);
   bb = BASIC_BLOCK_FOR_FUNCTION (fn, index);
 
-  bb->count = apply_probability (streamer_read_gcov_count (ib),
-                                 count_materialization_scale);
+  bb->count = apply_scale (streamer_read_gcov_count (ib),
+                           count_materialization_scale);
   bb->frequency = streamer_read_hwi (ib);
   bb->flags = streamer_read_hwi (ib);
 
Index: lto-streamer-in.c
===================================================================
--- lto-streamer-in.c	(revision 198344)
+++ lto-streamer-in.c	(working copy)
@@ -635,8 +635,8 @@  input_cfg (struct lto_input_block *ib, struct func
 
 	  dest_index = streamer_read_uhwi (ib);
 	  probability = (int) streamer_read_hwi (ib);
-	  count = apply_probability ((gcov_type) streamer_read_gcov_count (ib),
-                                     count_materialization_scale);
+	  count = apply_scale ((gcov_type) streamer_read_gcov_count (ib),
+                               count_materialization_scale);
 	  edge_flags = streamer_read_uhwi (ib);
 
 	  dest = BASIC_BLOCK_FOR_FUNCTION (fn, dest_index);
Index: tree-inline.c
===================================================================
--- tree-inline.c	(revision 198344)
+++ tree-inline.c	(working copy)
@@ -1519,7 +1519,7 @@  copy_bb (copy_body_data *id, basic_block bb, int f
      basic_block_info automatically.  */
   copy_basic_block = create_basic_block (NULL, (void *) 0,
                                          (basic_block) prev->aux);
-  /* Update to use apply_probability().  */
+  /* Update to use apply_scale().  */
   copy_basic_block->count = bb->count * count_scale / REG_BR_PROB_BASE;
 
   /* We are going to rebuild frequencies from scratch.  These values
@@ -1891,7 +1891,7 @@  copy_edges_for_bb (basic_block bb, gcov_type count
 	    && old_edge->dest->aux != EXIT_BLOCK_PTR)
 	  flags |= EDGE_FALLTHRU;
 	new_edge = make_edge (new_bb, (basic_block) old_edge->dest->aux, flags);
-        /* Update to use apply_probability().  */
+        /* Update to use apply_scale().  */
 	new_edge->count = old_edge->count * count_scale / REG_BR_PROB_BASE;
 	new_edge->probability = old_edge->probability;
       }
@@ -2278,7 +2278,7 @@  copy_cfg_body (copy_body_data * id, gcov_type coun
 	    incoming_frequency += EDGE_FREQUENCY (e);
 	    incoming_count += e->count;
 	  }
-      /* Update to use apply_probability().  */
+      /* Update to use apply_scale().  */
       incoming_count = incoming_count * count_scale / REG_BR_PROB_BASE;
       /* Update to use EDGE_FREQUENCY.  */
       incoming_frequency
Index: tree-optimize.c
===================================================================
--- tree-optimize.c	(revision 198344)
+++ tree-optimize.c	(working copy)
@@ -131,15 +131,15 @@  execute_fixup_cfg (void)
                             ENTRY_BLOCK_PTR->count);
 
   ENTRY_BLOCK_PTR->count = cgraph_get_node (current_function_decl)->count;
-  EXIT_BLOCK_PTR->count = apply_probability (EXIT_BLOCK_PTR->count,
-                                             count_scale);
+  EXIT_BLOCK_PTR->count = apply_scale (EXIT_BLOCK_PTR->count,
+                                       count_scale);
 
   FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
-    e->count = apply_probability (e->count, count_scale);
+    e->count = apply_scale (e->count, count_scale);
 
   FOR_EACH_BB (bb)
     {
-      bb->count = apply_probability (bb->count, count_scale);
+      bb->count = apply_scale (bb->count, count_scale);
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
 	{
 	  gimple stmt = gsi_stmt (gsi);
@@ -172,7 +172,7 @@  execute_fixup_cfg (void)
 	}
 
       FOR_EACH_EDGE (e, ei, bb->succs)
-        e->count = apply_probability (e->count, count_scale);
+        e->count = apply_scale (e->count, count_scale);
 
       /* If we have a basic block with no successors that does not
 	 end with a control statement or a noreturn call end it with
Index: lto-cgraph.c
===================================================================
--- lto-cgraph.c	(revision 198344)
+++ lto-cgraph.c	(working copy)
@@ -1347,10 +1347,10 @@  merge_profile_summaries (struct lto_file_decl_data
                                         file_data->profile_info.runs);
 	lto_gcov_summary.sum_max
             = MAX (lto_gcov_summary.sum_max,
-                   apply_probability (file_data->profile_info.sum_max, scale));
+                   apply_scale (file_data->profile_info.sum_max, scale));
 	lto_gcov_summary.sum_all
             = MAX (lto_gcov_summary.sum_all,
-                   apply_probability (file_data->profile_info.sum_all, scale));
+                   apply_scale (file_data->profile_info.sum_all, scale));
         /* Save a pointer to the profile_info with the largest
            scaled sum_all and the scale for use in merging the
            histogram.  */
@@ -1372,8 +1372,8 @@  merge_profile_summaries (struct lto_file_decl_data
       /* Scale up the min value as we did the corresponding sum_all
          above. Use that to find the new histogram index.  */
       gcov_type scaled_min
-          = apply_probability (saved_profile_info->histogram[h_ix].min_value,
-                               saved_scale);
+          = apply_scale (saved_profile_info->histogram[h_ix].min_value,
+                         saved_scale);
       /* The new index may be shared with another scaled histogram entry,
          so we need to account for a non-zero histogram entry at new_ix.  */
       unsigned new_ix = gcov_histo_index (scaled_min);
@@ -1386,8 +1386,8 @@  merge_profile_summaries (struct lto_file_decl_data
          here and place the scaled cumulative counter value in the bucket
          corresponding to the scaled minimum counter value.  */
       lto_gcov_summary.histogram[new_ix].cum_value
-          += apply_probability (saved_profile_info->histogram[h_ix].cum_value,
-                                saved_scale);
+          += apply_scale (saved_profile_info->histogram[h_ix].cum_value,
+                          saved_scale);
       lto_gcov_summary.histogram[new_ix].num_counters
           += saved_profile_info->histogram[h_ix].num_counters;
     }
@@ -1419,8 +1419,8 @@  merge_profile_summaries (struct lto_file_decl_data
 	if (scale == REG_BR_PROB_BASE)
 	  continue;
 	for (edge = node->callees; edge; edge = edge->next_callee)
-	  edge->count = apply_probability (edge->count, scale);
-	node->count = apply_probability (node->count, scale);
+	  edge->count = apply_scale (edge->count, scale);
+	node->count = apply_scale (node->count, scale);
       }
 }
 
Index: basic-block.h
===================================================================
--- basic-block.h	(revision 198344)
+++ basic-block.h	(working copy)
@@ -500,7 +500,7 @@  struct edge_list
 					      REG_BR_PROB_BASE)
 
 /* Compute a scale factor (or probability) suitable for scaling of
-   gcov_type values via apply_probability().  */
+   gcov_type values via apply_probability() and apply_scale().  */
 #define GCOV_COMPUTE_SCALE(num,den) \
   ((den) ? RDIV ((num) * REG_BR_PROB_BASE, (den)) : REG_BR_PROB_BASE)
 
@@ -952,13 +952,23 @@  combine_probabilities (int prob1, int prob2)
   return RDIV (prob1 * prob2, REG_BR_PROB_BASE);
 }
 
+/* Apply scale factor SCALE on frequency or count FREQ. Use this
+   interface when potentially scaling up, so that SCALE is not
+   constrained to be < REG_BR_PROB_BASE.  */
+
+static inline gcov_type
+apply_scale (gcov_type freq, int scale)
+{
+  return RDIV (freq * scale, REG_BR_PROB_BASE);
+}
+
 /* Apply probability PROB on frequency or count FREQ.  */
 
 static inline gcov_type
 apply_probability (gcov_type freq, int prob)
 {
   check_probability (prob);
-  return RDIV (freq * prob, REG_BR_PROB_BASE);
+  return apply_scale (freq, prob);
 }
 
 /* Return inverse probability for PROB.  */