Patchwork fix spelling in comments

login
register
mail settings
Submitter Nathan Froyd
Date Feb. 11, 2011, 2:56 p.m.
Message ID <20110211145631.GK6247@codesourcery.com>
Download mbox | patch
Permalink /patch/82762/
State New
Headers show

Comments

Nathan Froyd - Feb. 11, 2011, 2:56 p.m.
Trivial cleanups.  Bootstrapped on x86_64-unknown-linux-gnu.

-Nathan

	* cgraph.c: Fix comment typos.
	* cgraph.h: Likewise.
	* cgraphunit.c: Likewise.
	* ipa-cp.c: Likewise.
	* ipa-inline.c: Likewise.
	* ipa-prop.c: Likewise.
	* ipa-pure-const.c: Likewise.
	* ipa-ref.c: Likewise.
	* ipa-reference.c: Likewise.
Nathan Froyd - Feb. 11, 2011, 3:16 p.m.
On Fri, Feb 11, 2011 at 06:56:31AM -0800, Nathan Froyd wrote:
> Trivial cleanups.  Bootstrapped on x86_64-unknown-linux-gnu.
>  
> -/* List of hooks trigerred on cgraph_node events.  */
> +/* List of hooks trig-erred on cgraph_node events.  */

...and committed with fixing the spelling errors in my spelling
corrections.

-Nathan

Patch

diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index 6176b4d..1f7c905 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -164,28 +164,28 @@  static GTY(()) struct cgraph_asm_node *cgraph_asm_last_node;
    them, to support -fno-toplevel-reorder.  */
 int cgraph_order;
 
-/* List of hooks trigerred on cgraph_edge events.  */
+/* List of hooks triggered on cgraph_edge events.  */
 struct cgraph_edge_hook_list {
   cgraph_edge_hook hook;
   void *data;
   struct cgraph_edge_hook_list *next;
 };
 
-/* List of hooks trigerred on cgraph_node events.  */
+/* List of hooks trig-erred on cgraph_node events.  */
 struct cgraph_node_hook_list {
   cgraph_node_hook hook;
   void *data;
   struct cgraph_node_hook_list *next;
 };
 
-/* List of hooks trigerred on events involving two cgraph_edges.  */
+/* List of hooks trig-erred on events involving two cgraph_edges.  */
 struct cgraph_2edge_hook_list {
   cgraph_2edge_hook hook;
   void *data;
   struct cgraph_2edge_hook_list *next;
 };
 
-/* List of hooks trigerred on events involving two cgraph_nodes.  */
+/* List of hooks trig-erred on events involving two cgraph_nodes.  */
 struct cgraph_2node_hook_list {
   cgraph_2node_hook hook;
   void *data;
@@ -588,7 +588,7 @@  cgraph_same_body_alias (tree alias, tree decl)
 }
 
 /* Add thunk alias into callgraph.  The alias declaration is ALIAS and it
-   alises DECL with an adjustments made into the first parameter.
+   aliases DECL with an adjustments made into the first parameter.
    See comments in thunk_adjust for detail on the parameters.  */
 
 struct cgraph_node *
@@ -937,7 +937,7 @@  cgraph_create_edge_including_clones (struct cgraph_node *orig,
         /* It is possible that clones already contain the edge while
 	   master didn't.  Either we promoted indirect call into direct
 	   call in the clone or we are processing clones of unreachable
-	   master where edges has been rmeoved.  */
+	   master where edges has been removed.  */
 	if (edge)
 	  cgraph_set_call_stmt (edge, stmt);
 	else if (!cgraph_edge (node, stmt))
@@ -997,7 +997,7 @@  cgraph_create_edge_1 (struct cgraph_node *caller, struct cgraph_node *callee,
      have not been loaded yet.  */
   if (call_stmt)
     {
-      /* This is a rather expensive check possibly trigerring
+      /* This is a rather expensive check possibly triggering
 	 construction of call stmt hashtable.  */
       gcc_checking_assert (!cgraph_edge (caller, call_stmt));
 
@@ -1252,7 +1252,7 @@  cgraph_update_edges_for_call_stmt_node (struct cgraph_node *node,
   if (!new_call && !old_call)
     return;
   /* See if we turned indirect call into direct call or folded call to one builtin
-     into different bultin.  */
+     into different builtin.  */
   if (old_call != new_call)
     {
       struct cgraph_edge *e = cgraph_edge (node, old_stmt);
@@ -1973,7 +1973,7 @@  dump_cgraph_node (FILE *f, struct cgraph_node *node)
           fprintf (f, " %s/%i", cgraph_node_name (n), n->uid);
 	  if (n->thunk.thunk_p)
 	    {
-	      fprintf (f, " (thunk of %s fixed ofset %i virtual value %i has "
+	      fprintf (f, " (thunk of %s fixed offset %i virtual value %i has "
 		       "virtual offset %i",
 	      	       lang_hooks.decl_printable_name (n->thunk.alias, 2),
 		       (int)n->thunk.fixed_offset,
@@ -2411,8 +2411,8 @@  cgraph_function_body_availability (struct cgraph_node *node)
     avail = AVAIL_LOCAL;
   else if (!node->local.externally_visible)
     avail = AVAIL_AVAILABLE;
-  /* Inline functions are safe to be analyzed even if their sybol can
-     be overwritten at runtime.  It is not meaningful to enfore any sane
+  /* Inline functions are safe to be analyzed even if their symbol can
+     be overwritten at runtime.  It is not meaningful to enforce any sane
      behaviour on replacing inline function by different body.  */
   else if (DECL_DECLARED_INLINE_P (node->decl))
     avail = AVAIL_AVAILABLE;
@@ -2706,9 +2706,9 @@  cgraph_propagate_frequency (struct cgraph_node *node)
       if (edge->caller != node)
 	{
           only_called_at_startup &= edge->caller->only_called_at_startup;
-	  /* It makes snese to put main() together with the static constructors.
+	  /* It makes sense to put main() together with the static constructors.
 	     It will be executed for sure, but rest of functions called from
-	     main are definitly not at startup only.  */
+	     main are definitely not at startup only.  */
 	  if (MAIN_NAME_P (DECL_NAME (edge->caller->decl)))
 	    only_called_at_startup = 0;
           only_called_at_exit &= edge->caller->only_called_at_exit;
@@ -2843,7 +2843,7 @@  cgraph_can_remove_if_no_direct_calls_and_refs_p (struct cgraph_node *node)
   return true;
 }
 
-/* Return true when function NODE can be excpected to be removed
+/* Return true when function NODE can be expected to be removed
    from program when direct calls in this compilation unit are removed.
 
    As a special case COMDAT functions are
@@ -2852,7 +2852,7 @@  cgraph_can_remove_if_no_direct_calls_and_refs_p (struct cgraph_node *node)
    unit)
 
    This function behaves as cgraph_only_called_directly_p because eliminating
-   all uses of COMDAT function does not make it neccesarily disappear from
+   all uses of COMDAT function does not make it necessarily disappear from
    the program unless we are compiling whole program or we do LTO.  In this
    case we know we win since dynamic linking will not really discard the
    linkonce section.  */
@@ -2874,7 +2874,7 @@  cgraph_will_be_removed_from_program_if_no_direct_calls (struct cgraph_node *node
 }
 
 /* Return true when RESOLUTION indicate that linker will use
-   the symbol from non-LTo object files.  */
+   the symbol from non-LTO object files.  */
 
 bool
 resolution_used_from_other_file_p (enum ld_plugin_symbol_resolution resolution)
diff --git a/gcc/cgraph.h b/gcc/cgraph.h
index 2df3fa5..90ee128 100644
--- a/gcc/cgraph.h
+++ b/gcc/cgraph.h
@@ -943,7 +943,7 @@  varpool_can_remove_if_no_refs (struct varpool_node *node)
 /* Return true when all references to VNODE must be visible in ipa_ref_list.
    i.e. if the variable is not externally visible or not used in some magic
    way (asm statement or such).
-   The magic uses are all sumarized in force_output flag.  */
+   The magic uses are all summarized in force_output flag.  */
 
 static inline bool
 varpool_all_refs_explicit_p (struct varpool_node *vnode)
diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c
index f6fe272..cc19be7 100644
--- a/gcc/cgraphunit.c
+++ b/gcc/cgraphunit.c
@@ -364,7 +364,7 @@  cgraph_finalize_function (tree decl, bool nested)
       || DECL_STATIC_CONSTRUCTOR (decl)
       || DECL_STATIC_DESTRUCTOR (decl)
       /* COMDAT virtual functions may be referenced by vtable from
-	 other compilatoin unit.  Still we want to devirtualize calls
+	 other compilation unit.  Still we want to devirtualize calls
 	 to those so we need to analyze them.
 	 FIXME: We should introduce may edges for this purpose and update
 	 their handling in unreachable function removal and inliner too.  */
@@ -431,7 +431,7 @@  verify_edge_count_and_frequency (struct cgraph_edge *e)
 	  != compute_call_stmt_bb_frequency (e->caller->decl,
 					     gimple_bb (e->call_stmt))))
     {
-      error ("caller edge frequency %i does not match BB freqency %i",
+      error ("caller edge frequency %i does not match BB frequency %i",
 	     e->frequency,
 	     compute_call_stmt_bb_frequency (e->caller->decl,
 					     gimple_bb (e->call_stmt)));
@@ -1550,7 +1550,7 @@  cgraph_expand_function (struct cgraph_node *node)
       	   alias && alias->next; alias = alias->next)
         ;
       /* Walk aliases in the order they were created; it is possible that
-         thunks reffers to the aliases made earlier.  */
+         thunks refers to the aliases made earlier.  */
       for (; alias; alias = next)
         {
 	  next = alias->previous;
@@ -2324,7 +2324,7 @@  cgraph_materialize_all_clones (void)
 	        {
 		  if (cgraph_dump_file)
 		    {
-		      fprintf (cgraph_dump_file, "clonning %s to %s\n",
+		      fprintf (cgraph_dump_file, "cloning %s to %s\n",
 			       cgraph_node_name (node->clone_of),
 			       cgraph_node_name (node));
 		      if (node->clone.tree_map)
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index b06238d..0ef640b 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -50,7 +50,7 @@  along with GCC; see the file COPYING3.  If not see
    with the value 3.
 
    The algorithm used is based on "Interprocedural Constant Propagation", by
-   Challahan David, Keith D Cooper, Ken Kennedy, Linda Torczon, Comp86, pg
+   David Callahan, Keith D Cooper, Ken Kennedy, Linda Torczon, Comp86, pg
    152-161
 
    The optimization is divided into three stages:
@@ -469,7 +469,7 @@  ipcp_cloning_candidate_p (struct cgraph_node *node)
   if (cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE)
     {
       if (dump_file)
-        fprintf (dump_file, "Not considering %s for cloning; body is overwrittable.\n",
+        fprintf (dump_file, "Not considering %s for cloning; body is overwritable.\n",
  	         cgraph_node_name (node));
       return false;
     }
@@ -521,7 +521,7 @@  ipcp_cloning_candidate_p (struct cgraph_node *node)
 
   /* When profile is available and function is hot, propagate into it even if
      calls seems cold; constant propagation can improve function's speed
-     significandly.  */
+     significantly.  */
   if (max_count)
     {
       if (direct_call_sum > node->count * 90 / 100)
@@ -614,8 +614,8 @@  build_const_val (struct ipcp_lattice *lat, tree tree_type)
    FIXME: This code is wrong.  Since the callers can be also clones and
    the clones are not scaled yet, the sums gets unrealistically high.
    To properly compute the counts, we would need to do propagation across
-   callgraph (as external call to A might imply call to non-clonned B
-   if A's clone calls clonned B).  */
+   callgraph (as external call to A might imply call to non-cloned B
+   if A's clone calls cloned B).  */
 static void
 ipcp_compute_node_scale (struct cgraph_node *node)
 {
@@ -1127,7 +1127,7 @@  ipcp_estimate_growth (struct cgraph_node *node)
     else
       need_original = true;
 
-  /* If we will be able to fully replace orignal node, we never increase
+  /* If we will be able to fully replace original node, we never increase
      program size.  */
   if (!need_original)
     return 0;
@@ -1148,7 +1148,7 @@  ipcp_estimate_growth (struct cgraph_node *node)
       }
 
   /* We make just very simple estimate of savings for removal of operand from
-     call site.  Precise cost is dificult to get, as our size metric counts
+     call site.  Precise cost is difficult to get, as our size metric counts
      constants and moves as free.  Generally we are looking for cases that
      small function is called very many times.  */
   growth = node->local.inline_summary.self_size
@@ -1380,7 +1380,7 @@  ipcp_insert_stage (void)
 
       new_size += growth;
 
-      /* Look if original function becomes dead after clonning.  */
+      /* Look if original function becomes dead after cloning.  */
       for (cs = node->callers; cs != NULL; cs = cs->next_caller)
 	if (cs->caller == node || ipcp_need_redirect_p (cs))
 	  break;
@@ -1555,7 +1555,7 @@  static bool
 cgraph_gate_cp (void)
 {
   /* FIXME: We should remove the optimize check after we ensure we never run
-     IPA passes when not optimizng.  */
+     IPA passes when not optimizing.  */
   return flag_ipa_cp && optimize;
 }
 
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index 8087c81..a415aa7 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -333,7 +333,7 @@  cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
   ncalls_inlined++;
 
   /* FIXME: We should remove the optimize check after we ensure we never run
-     IPA passes when not optimizng.  */
+     IPA passes when not optimizing.  */
   if (flag_indirect_inlining && optimize)
     return ipa_propagate_indirect_call_infos (curr, new_edges);
   else
@@ -528,7 +528,7 @@  cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
 
   if (dump)
     {
-      fprintf (dump_file, "    Badness calculcation for %s -> %s\n",
+      fprintf (dump_file, "    Badness calculation for %s -> %s\n",
 	       cgraph_node_name (edge->caller),
 	       cgraph_node_name (edge->callee));
       fprintf (dump_file, "      growth %i, time %i-%i, size %i-%i\n",
@@ -653,7 +653,7 @@  update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
       /* fibheap_replace_key only decrease the keys.
 	 When we increase the key we do not update heap
 	 and instead re-insert the element once it becomes
-	 a minium of heap.  */
+	 a minimum of heap.  */
       if (badness < n->key)
 	{
 	  fibheap_replace_key (heap, n, badness);
@@ -1006,7 +1006,7 @@  add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
    All inline candidates are put into prioritized heap based on estimated
    growth of the overall number of instructions and then update the estimates.
 
-   INLINED and INLINED_CALEES are just pointers to arrays large enough
+   INLINED and INLINED_CALLEES are just pointers to arrays large enough
    to be passed to cgraph_inlined_into and cgraph_inlined_callees.  */
 
 static void
@@ -1118,7 +1118,7 @@  cgraph_decide_inlining_of_small_functions (void)
 	 ??? When the frequencies are taken into account we might not need this
 	 restriction.
 
-	 We need to be cureful here, in some testcases, e.g. directivec.c in
+	 We need to be careful here, in some testcases, e.g. directives.c in
 	 libcpp, we can estimate self recursive function to have negative growth
 	 for inlining completely.
 	 */
@@ -1821,7 +1821,7 @@  struct gimple_opt_pass pass_early_inline =
    0 - means not eliminated
    1 - half of statements goes away
    2 - for sure it is eliminated.
-   We are not terribly sophisficated, basically looking for simple abstraction
+   We are not terribly sophisticated, basically looking for simple abstraction
    penalty wrappers.  */
 
 static int
@@ -2005,7 +2005,7 @@  compute_inline_parameters (struct cgraph_node *node)
     {
       struct cgraph_edge *e;
 
-      /* Functions calling builtlin_apply can not change signature.  */
+      /* Functions calling builtin_apply can not change signature.  */
       for (e = node->callees; e; e = e->next_callee)
 	if (DECL_BUILT_IN (e->callee->decl)
 	    && DECL_BUILT_IN_CLASS (e->callee->decl) == BUILT_IN_NORMAL
@@ -2052,7 +2052,7 @@  struct gimple_opt_pass pass_inline_parameters =
  }
 };
 
-/* This function performs intraprocedural analyzis in NODE that is required to
+/* This function performs intraprocedural analysis in NODE that is required to
    inline indirect calls.  */
 static void
 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
@@ -2074,7 +2074,7 @@  analyze_function (struct cgraph_node *node)
 
   compute_inline_parameters (node);
   /* FIXME: We should remove the optimize check after we ensure we never run
-     IPA passes when not optimizng.  */
+     IPA passes when not optimizing.  */
   if (flag_indirect_inlining && optimize)
     inline_indirect_intraprocedural_analysis (node);
 
@@ -2120,7 +2120,7 @@  inline_transform (struct cgraph_node *node)
   struct cgraph_edge *e;
   bool inline_p = false;
 
-  /* FIXME: Currently the passmanager is adding inline transform more than once to some
+  /* FIXME: Currently the pass manager is adding inline transform more than once to some
      clones.  This needs revisiting after WPA cleanups.  */
   if (cfun->after_inlining)
     return 0;
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 1970881..82599f1 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -416,7 +416,7 @@  stmt_may_be_vtbl_ptr_store (gimple stmt)
   return true;
 }
 
-/* Callbeck of walk_aliased_vdefs and a helper function for
+/* Callback of walk_aliased_vdefs and a helper function for
    detect_type_change to check whether a particular statement may modify
    the virtual table pointer, and if possible also determine the new type of
    the (sub-)object.  It stores its result into DATA, which points to a
@@ -679,7 +679,7 @@  compute_complex_ancestor_jump_func (struct ipa_node_params *info,
     }
 }
 
-/* Given OP whch is passed as an actual argument to a called function,
+/* Given OP which is passed as an actual argument to a called function,
    determine if it is possible to construct a KNOWN_TYPE jump function for it
    and if so, create one and store it to JFUNC.  */
 
@@ -899,7 +899,7 @@  fill_member_ptr_cst_jump_function (struct ipa_jump_func *jfunc,
   jfunc->value.member_cst.delta = delta;
 }
 
-/* If RHS is an SSA_NAMe and it is defined by a simple copy assign statement,
+/* If RHS is an SSA_NAME and it is defined by a simple copy assign statement,
    return the rhs of its defining statement.  */
 
 static inline tree
@@ -1516,7 +1516,7 @@  ipa_analyze_params_uses (struct cgraph_node *node,
 }
 
 /* Initialize the array describing properties of of formal parameters of NODE,
-   analyze their uses and and compute jump functions associated witu actual
+   analyze their uses and and compute jump functions associated with actual
    arguments of calls from within NODE.  */
 
 void
@@ -1546,7 +1546,7 @@  ipa_analyze_node (struct cgraph_node *node)
 }
 
 
-/* Update the jump function DST when the call graph edge correspondng to SRC is
+/* Update the jump function DST when the call graph edge corresponding to SRC is
    is being inlined, knowing that DST is of type ancestor and src of known
    type.  */
 
@@ -1704,7 +1704,7 @@  try_make_edge_direct_simple_call (struct cgraph_edge *ie,
 }
 
 /* Try to find a destination for indirect edge IE that corresponds to a
-   virtuall call based on a formal parameter which is described by jump
+   virtual call based on a formal parameter which is described by jump
    function JFUNC and if it can be determined, make it direct and return the
    direct edge.  Otherwise, return NULL.  */
 
@@ -2054,7 +2054,7 @@  ipa_unregister_cgraph_hooks (void)
   node_duplication_hook_holder = NULL;
 }
 
-/* Allocate all necessary data strucutures necessary for indirect inlining.  */
+/* Allocate all necessary data structures necessary for indirect inlining.  */
 
 void
 ipa_create_all_structures_for_iinln (void)
@@ -2783,7 +2783,7 @@  ipa_write_node_info (struct output_block *ob, struct cgraph_node *node)
     ipa_write_indirect_edge_info (ob, e);
 }
 
-/* Srtream in NODE info from IB.  */
+/* Stream in NODE info from IB.  */
 
 static void
 ipa_read_node_info (struct lto_input_block *ib, struct cgraph_node *node,
@@ -2919,7 +2919,7 @@  ipa_prop_read_jump_functions (void)
 }
 
 /* After merging units, we can get mismatch in argument counts.
-   Also decl merging might've rendered parameter lists obsolette.
+   Also decl merging might've rendered parameter lists obsolete.
    Also compute called_with_variable_arg info.  */
 
 void
diff --git a/gcc/ipa-pure-const.c b/gcc/ipa-pure-const.c
index 101e8c9..3b4cc02 100644
--- a/gcc/ipa-pure-const.c
+++ b/gcc/ipa-pure-const.c
@@ -423,7 +423,7 @@  worse_state (enum pure_const_state_e *state, bool *looping,
   *looping = MAX (*looping, looping2);
 }
 
-/* Recognize special cases of builtins that are by themself not pure or const
+/* Recognize special cases of builtins that are by themselves not pure or const
    but function using them is.  */
 static bool
 special_builtin_state (enum pure_const_state_e *state, bool *looping,
@@ -547,7 +547,7 @@  check_call (funct_state local, gimple call, bool ipa)
         fprintf (dump_file, "    Recursive call can loop.\n");
       local->looping = true;
     }
-  /* Either calle is unknown or we are doing local analysis.
+  /* Either callee is unknown or we are doing local analysis.
      Look to see if there are any bits available for the callee (such as by
      declaration or because it is builtin) and process solely on the basis of
      those bits. */
@@ -771,7 +771,7 @@  end:
       if (mark_dfs_back_edges ())
         {
 	  /* Preheaders are needed for SCEV to work.
-	     Simple lateches and recorded exits improve chances that loop will
+	     Simple latches and recorded exits improve chances that loop will
 	     proved to be finite in testcases such as in loop-15.c and loop-24.c  */
 	  loop_optimizer_init (LOOPS_NORMAL
 			       | LOOPS_HAVE_RECORDED_EXITS);
@@ -916,7 +916,7 @@  generate_summary (void)
 
      We process AVAIL_OVERWRITABLE functions.  We can not use the results
      by default, but the info can be used at LTO with -fwhole-program or
-     when function got clonned and the clone is AVAILABLE.  */
+     when function got cloned and the clone is AVAILABLE.  */
 
   for (node = cgraph_nodes; node; node = node->next)
     if (cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
@@ -1545,7 +1545,7 @@  skip_function_for_local_pure_const (struct cgraph_node *node)
   if (cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE)
     {
       if (dump_file)
-        fprintf (dump_file, "Function is not available or overwrittable; not analyzing.\n");
+        fprintf (dump_file, "Function is not available or overwritable; not analyzing.\n");
       return true;
     }
   return false;
diff --git a/gcc/ipa-ref.c b/gcc/ipa-ref.c
index 27c32dc..db70e6e 100644
--- a/gcc/ipa-ref.c
+++ b/gcc/ipa-ref.c
@@ -197,7 +197,7 @@  ipa_dump_refering (FILE * file, struct ipa_ref_list *list)
   fprintf (file, "\n");
 }
 
-/* Clone all references from SRC to DEST_NODE or DEST_VARPOL_NODE.  */
+/* Clone all references from SRC to DEST_NODE or DEST_VARPOOL_NODE.  */
 
 void
 ipa_clone_references (struct cgraph_node *dest_node,
@@ -215,7 +215,7 @@  ipa_clone_references (struct cgraph_node *dest_node,
 			  ref->use, ref->stmt);
 }
 
-/* Clone all refering from SRC to DEST_NODE or DEST_VARPOL_NODE.  */
+/* Clone all refering from SRC to DEST_NODE or DEST_VARPOOL_NODE.  */
 
 void
 ipa_clone_refering (struct cgraph_node *dest_node,
diff --git a/gcc/ipa-reference.c b/gcc/ipa-reference.c
index a2a8517..f874a2e 100644
--- a/gcc/ipa-reference.c
+++ b/gcc/ipa-reference.c
@@ -86,7 +86,7 @@  struct ipa_reference_global_vars_info_d
   bitmap statics_written;
 };
 
-/* Information we save about every function after ipa-reference is completted.  */
+/* Information we save about every function after ipa-reference is completed.  */
 
 struct ipa_reference_optimization_summary_d
 {
@@ -658,7 +658,7 @@  propagate (void)
       read_all = false;
       write_all = false;
 
-      /* When function is overwrittable, we can not assume anything.  */
+      /* When function is overwritable, we can not assume anything.  */
       if (cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE)
         read_write_all_from_decl (node, &read_all, &write_all);
 
@@ -691,7 +691,7 @@  propagate (void)
 	  if (dump_file && (dump_flags & TDF_DETAILS))
 	    fprintf (dump_file, "  Visiting %s/%i\n",
 		      cgraph_node_name (w), w->uid);
-	  /* When function is overwrittable, we can not assume anything.  */
+	  /* When function is overwritable, we can not assume anything.  */
 	  if (cgraph_function_body_availability (w) <= AVAIL_OVERWRITABLE)
 	    read_write_all_from_decl (w, &read_all, &write_all);