diff mbox series

[5/5] Adds ipa-initcall-cp pass.

Message ID 6910a44d-900e-b7a9-f881-8ab525d13e3d@theobroma-systems.com
State New
Headers show
Series ipa-initcall-cp | expand

Commit Message

Erick Ochoa May 29, 2020, 5:32 p.m. UTC
This pass is a variant of constant propagation where global
primitive constants with a single write are propagated to multiple
read statements.

ChangeLog:

2020-05-20  Erick Ochoa <erick.ochoa@theobroma.systems.com>

	gcc/Makefile.in: Adds new pass
	gcc/passes.def: Same
	gcc/tree-pass.h: Same
	gcc/common.opt: Same
	gcc/cgraph.h: Adds new field to cgraph_node
	gcc/cgraph.c: Same
	gcc/ipa-cp.c: May skip ipa-cp for a function
	if initcall-cp has constants to propagate
	in that same function
	gcc/ipa-initcall-cp.c: New file
---
  gcc/Makefile.in       |    1 +
  gcc/cgraph.h          |    5 +-
  gcc/common.opt        |    8 +
  gcc/ipa-cp.c          |    2 +-
  gcc/ipa-initcall-cp.c | 1199 +++++++++++++++++++++++++++++++++++++++++
  gcc/passes.def        |    1 +
  gcc/tree-pass.h       |    1 +
  7 files changed, 1215 insertions(+), 2 deletions(-)
  create mode 100644 gcc/ipa-initcall-cp.c

  extern ipa_opt_pass_d *make_pass_ipa_cp (gcc::context *ctxt);
  extern ipa_opt_pass_d *make_pass_ipa_sra (gcc::context *ctxt);
  extern ipa_opt_pass_d *make_pass_ipa_icf (gcc::context *ctxt);

Comments

Richard Biener June 2, 2020, 12:58 p.m. UTC | #1
On Fri, May 29, 2020 at 8:52 PM Erick Ochoa
<erick.ochoa@theobroma-systems.com> wrote:
>
>
>
> This pass is a variant of constant propagation where global
> primitive constants with a single write are propagated to multiple
> read statements.

Just a few small comments while skimming through the code

> ChangeLog:
>
> 2020-05-20  Erick Ochoa <erick.ochoa@theobroma.systems.com>
>
>         gcc/Makefile.in: Adds new pass
>         gcc/passes.def: Same
>         gcc/tree-pass.h: Same
>         gcc/common.opt: Same
>         gcc/cgraph.h: Adds new field to cgraph_node
>         gcc/cgraph.c: Same
>         gcc/ipa-cp.c: May skip ipa-cp for a function
>         if initcall-cp has constants to propagate
>         in that same function
>         gcc/ipa-initcall-cp.c: New file
> ---
>   gcc/Makefile.in       |    1 +
>   gcc/cgraph.h          |    5 +-
>   gcc/common.opt        |    8 +
>   gcc/ipa-cp.c          |    2 +-
>   gcc/ipa-initcall-cp.c | 1199 +++++++++++++++++++++++++++++++++++++++++
>   gcc/passes.def        |    1 +
>   gcc/tree-pass.h       |    1 +
>   7 files changed, 1215 insertions(+), 2 deletions(-)
>   create mode 100644 gcc/ipa-initcall-cp.c
>
> diff --git a/gcc/Makefile.in b/gcc/Makefile.in
> index 543b477ff18..b94fb633d78 100644
> --- a/gcc/Makefile.in
> +++ b/gcc/Makefile.in
> @@ -1401,6 +1401,7 @@ OBJS = \
>         internal-fn.o \
>         ipa-cp.o \
>         ipa-sra.o \
> +       ipa-initcall-cp.o \
>         ipa-devirt.o \
>         ipa-fnsummary.o \
>         ipa-polymorphic-call.o \
> diff --git a/gcc/cgraph.h b/gcc/cgraph.h
> index 5ddeb65269b..532b4671609 100644
> --- a/gcc/cgraph.h
> +++ b/gcc/cgraph.h
> @@ -937,7 +937,7 @@ struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node :
> public symtab_node
>         split_part (false), indirect_call_target (false), local (false),
>         versionable (false), can_change_signature (false),
>         redefined_extern_inline (false), tm_may_enter_irr (false),
> -      ipcp_clone (false), m_uid (uid), m_summary_id (-1)
> +      ipcp_clone (false), skip_ipa_cp (false), m_uid (uid),
> m_summary_id (-1)
>     {}
>
>     /* Remove the node from cgraph and all inline clones inlined into it.
> @@ -1539,6 +1539,9 @@ struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node
> : public symtab_node
>     unsigned tm_may_enter_irr : 1;
>     /* True if this was a clone created by ipa-cp.  */
>     unsigned ipcp_clone : 1;
> +  /* True if ipa-initcall-cp and therefore we need to skip ipa-cp for
> cgraph
> +   * node.  */
> +  unsigned skip_ipa_cp : 1;
>
>   private:
>     /* Unique id of the node.  */
> diff --git a/gcc/common.opt b/gcc/common.opt
> index d33383b523c..b2d8d1b0958 100644
> --- a/gcc/common.opt
> +++ b/gcc/common.opt
> @@ -3408,4 +3408,12 @@ fipa-ra
>   Common Report Var(flag_ipa_ra) Optimization
>   Use caller save register across calls if possible.
>
> +fipa-initcall-cp-dryrun
> +Common Report Var(flag_initcall_cp_dryrun)
> +Run analysis for propagating constants across initialization functions.
> +
> +fipa-initcall-cp
> +Common Report Var(flag_initcall_cp) Optimization
> +Run transformation for propagation constants across initialization
> functions.
> +
>   ; This comment is to ensure we retain the blank line above.
> diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
> index c64e9104a94..1036cb0684e 100644
> --- a/gcc/ipa-cp.c
> +++ b/gcc/ipa-cp.c
> @@ -1188,7 +1188,7 @@ initialize_node_lattices (struct cgraph_node *node)
>
>     gcc_checking_assert (node->has_gimple_body_p ());
>
> -  if (!ipa_get_param_count (info))
> +  if (!ipa_get_param_count (info) || node->skip_ipa_cp)
>       disable = true;
>     else if (node->local)
>       {
> diff --git a/gcc/ipa-initcall-cp.c b/gcc/ipa-initcall-cp.c
> new file mode 100644
> index 00000000000..02f70b7a908
> --- /dev/null
> +++ b/gcc/ipa-initcall-cp.c
> @@ -0,0 +1,1199 @@
> +/* Initcall constant propagation pass.
> +   Copyright (C) 2019-2020 Free Software Foundation, Inc.
> +
> +   Contributed by Erick Ochoa <erick.ochoa@theobroma-systems.com>
> +
> +This file is part of GCC.
> +
> +GCC is free software; you can redistribute it and/or modify it under
> +the terms of the GNU General Public License as published by the Free
> +Software Foundation; either version 3, or (at your option) any later
> +version.
> +
> +GCC is distributed in the hope that it will be useful, but WITHOUT ANY
> +WARRANTY; without even the implied warranty of MERCHANTABILITY or
> +FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
> +for more details.
> +
> +You should have received a copy of the GNU General Public License
> +along with GCC; see the file COPYING3.  If not see
> +<http://www.gnu.org/licenses/>.  */
> +
> +#include "config.h"
> +#include "system.h"
> +#include "coretypes.h"
> +#include "backend.h"
> +#include "tree.h"
> +#include "tree-eh.h"
> +#include "gimple.h"
> +#include "gimple-expr.h"
> +#include "gimple-iterator.h"
> +#include "predict.h"
> +#include "alloc-pool.h"
> +#include "tree-pass.h"
> +#include "cgraph.h"
> +#include "diagnostic.h"
> +#include "fold-const.h"
> +#include "gimple-fold.h"
> +#include "symbol-summary.h"
> +#include "tree-vrp.h"
> +#include "ipa-prop.h"
> +#include "tree-pretty-print.h"
> +#include "tree-inline.h"
> +#include "ipa-fnsummary.h"
> +#include "ipa-utils.h"
> +#include "tree-ssa-ccp.h"
> +#include "stringpool.h"
> +#include "attribs.h"
> +#include "gimple-pretty-print.h"
> +#include "ssa.h"
> +
> +#define INITCALL_CP_SUFFIX "initcall.cp"
> +
> +typedef vec<struct ipa_ref *> ipa_ref_vec;
> +
> +/* log to dump file */
> +static inline void
> +log (const char *const fmt, ...)
> +{
> +  if (!dump_file)
> +    return;
> +
> +  va_list args;
> +  va_start (args, fmt);
> +  vfprintf (dump_file, fmt, args);
> +  va_end (args);
> +}
> +
> +bool
> +reach_nodes_via_bb1 (cgraph_node *n2);
> +static bool
> +bb1_dominates_bb2 (basic_block, basic_block, cgraph_node *);
> +static bool
> +write_before_call (struct ipa_ref *, struct ipa_ref *);
> +static bool
> +call_before_read (struct ipa_ref *, struct ipa_ref *);
> +static hash_map<const char *, unsigned> *clone_num_suffixes1;
> +static hash_map<cgraph_node *, cgraph_node *> *func_to_clone;
> +static vec<struct cgraph_node *>
> +get_nondominated_callees (cgraph_node *caller, cgraph_node *callee,
> +                         bool *exitEarly = NULL);
> +
> +static void
> +load_function_body_of_ipa_ref (cgraph_node *node)
> +{
> +  gcc_assert (in_lto_p);
> +  cgraph_node *f_cnode2 = node->ultimate_alias_target ();
> +  const char *name = f_cnode2->dump_asm_name ();
> +  log ("%s: for function '%s'\n", __func__, name);
> +  if (dump_file)
> +    {
> +      dump_node (f_cnode2->decl, TDF_DETAILS, dump_file);
> +    }
> +  f_cnode2->get_untransformed_body ();
> +}
> +
> +static void
> +load_function_body_of_ipa_ref (struct ipa_ref *ref)
> +{
> +  /* IPA passes don't get the function bodies during LTO.  */
> +  gcc_assert (in_lto_p);
> +
> +  symtab_node *f_node = ref->referring;
> +  cgraph_node *f_cnode = dyn_cast<cgraph_node *> (f_node);
> +  load_function_body_of_ipa_ref (f_cnode);
> +}
> +
> +static void
> +dump_vnode_ipa_ref (ipa_ref *ref)
> +{
> +  if (!dump_file)
> +    return;
> +  log ("Reference type: %s\n", stringify_ipa_ref_use (ref->use));
> +
> +  symtab_node *f_node = ref->referring;
> +  log ("Reference function: %s\n", f_node->dump_asm_name ());
> +
> +  log ("Gimple stmt (@0x%lx, lto_stmt_uid: %u):\n", (long) ref->stmt,
> +       ref->lto_stmt_uid);
> +  load_function_body_of_ipa_ref (ref);
> +  print_gimple_stmt (dump_file, ref->stmt, 0, TDF_NONE);
> +}
> +
> +/* Return true of all ops of assignment are constants.  */
> +static bool
> +gimple_assign_is_single_const (gimple *stmt)
> +{
> +  tree op;
> +
> +  gcc_assert (gimple_code (stmt) == GIMPLE_ASSIGN);
> +
> +  if (gimple_has_volatile_ops (stmt))
> +    {
> +      log ("has volatile ops!\n");
> +      return false;
> +    }
> +
> +  if (gimple_num_ops (stmt) != 2)
> +    {
> +      log ("wrong num of ops: %u!\n", gimple_num_ops (stmt));
> +      return false;
> +    }

So you probably want to check gimple_assign_single_p () which
verifies you are dealing with a plain assignment.

> +  op = gimple_op (stmt, 1);

gimple_assign_rhs1 is the type-safe way to access the RHS.

> +  if (!tree_code_is_cst (op))
> +    {
> +      log ("op is not cst!\n");
> +      return false;
> +    }
> +
> +  return true;
> +}
> +
> +/* Collects calls which are not dominated by callee */
> +// assumptions:
> +//  * there is a callsite from caller to callee
> +static vec<struct cgraph_node *>
> +get_nondominated_callees (cgraph_node *caller, cgraph_node *callee,
> +                         bool *exitEarly)
> +{
> +  gcc_assert (in_lto_p);
> +  caller->get_untransformed_body ();
> +
> +  function *func = DECL_STRUCT_FUNCTION (caller->decl);
> +  basic_block bb;
> +  bool found = false;
> +  FOR_EACH_BB_FN (bb, func)
> +    {
> +      for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
> +          gsi_next (&gsi))
> +       {
> +         gimple *stmt = gsi_stmt (gsi);
> +         if (gimple_code (stmt) != GIMPLE_CALL)
> +           continue;
> +
> +         if (dump_file)
> +           print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
> +         tree rhs = gimple_op (stmt, 1);

gimple_op (stmt, 1) is the function called (gimple_call_fn () btw)

> +         if (TREE_CODE (rhs) == RECORD_TYPE)

So this will never be true.

> +           {
> +             gcc_assert (exitEarly);
> +             *exitEarly = true;
> +             return vNULL;
> +           }
> +         tree callee_decl = gimple_call_fndecl (stmt);
> +         if (!callee_decl)
> +           {
> +             gcc_assert (exitEarly);
> +             *exitEarly = true;
> +             return vNULL;
> +           }
> +         cgraph_node *callee_node = cgraph_node::get (callee_decl);
> +         if (callee_node != callee)

The function is named _nondominated but I see no dominance
checks at all.  FOR_EACH_BB_FN does not in any way iterate
over a specific order of basic-blocks.

> +           continue;
> +
> +         found = true;
> +       }
> +      if (found)
> +       break;
> +    }
> +
> +  gcc_assert (found);
> +
> +  // At this point in the program, we hold a valid bb.
> +  // The callee is located in the bb

OK, so with cgraph-edges recorded you could have walked
cgraph edges, finding one with callee and use edge->call_stmt
to find the block.  There may be more than one call to callee?
Eventually the caller of this function already knows the callgraph edge...

> +  vec<struct cgraph_node *> callees = vNULL;
> +  basic_block bb_c;
> +  FOR_EACH_BB_FN (bb_c, func)
> +    {
> +      bool self_dominates = bb == bb_c;
> +      bool bb_dominates_bbc = bb1_dominates_bb2 (bb, bb_c, caller);
> +      if (bb_dominates_bbc && !self_dominates)
> +       continue;
> +
> +      for (gimple_stmt_iterator gsi = gsi_start_bb (bb_c); !gsi_end_p
> (gsi);
> +          gsi_next (&gsi))
> +       {
> +         gimple *stmt = gsi_stmt (gsi);
> +         if (gimple_code (stmt) != GIMPLE_CALL)
> +           continue;
> +
> +         tree callee_decl = gimple_call_fndecl (stmt);
> +         cgraph_node *callee_node = cgraph_node::get (callee_decl);
> +
> +         if (fndecl_built_in_p (callee_node->decl))
> +           continue;
> +
> +         if (self_dominates && callee_node == callee)
> +           {
> +             break;
> +           }
> +
> +         callees.safe_push (callee_node);
> +       }
> +    }
> +  return callees;
> +}
> +
> +/* Returns true if bb1 dominates bb2
> + * Includes self-dominance */
> +static bool
> +bb1_dominates_bb2 (basic_block bb1, basic_block bb2, cgraph_node *cnode)
> +{
> +  // self dominance
> +  if (bb1 == bb2)
> +    return true;
> +
> +  push_cfun (DECL_STRUCT_FUNCTION (cnode->decl));
> +
> +  /* If dominator info is not available, we need to calculate it.  */
> +  if (!dom_info_available_p (CDI_DOMINATORS))
> +    calculate_dominance_info (CDI_DOMINATORS);
> +
> +  /* Check if the read is dominated by the write.  */
> +  bool ret = dominated_by_p (CDI_DOMINATORS, bb2, bb1);
> +
> +  /* Restore cfun.  */
> +  pop_cfun ();

push/pop_cfun is _very_ expensive.  Dominance query does not
need it set but computation looks at cfun IIRC.  Adjust those to
explicitely passed struct function instead.

> +
> +  return ret;
> +}
> +
> +/* Return true if write occurs before read.
> + * write and read must be located in the same function
> + * */
> +static bool
> +write_before_read_in_function (struct ipa_ref *write, struct ipa_ref *read)
> +{
> +  basic_block w_bb = gimple_bb (write->stmt);
> +  basic_block r_bb = gimple_bb (read->stmt);
> +
> +  if (w_bb != r_bb)
> +    {
> +      /*
> +       * The dominator framework operates on cfun.
> +       * Therefore we need to set cfun accordingly.
> +       */
> +      symtab_node *w_node = write->referring;
> +      cgraph_node *w_cnode = dyn_cast<cgraph_node *> (w_node);
> +      return bb1_dominates_bb2 (w_bb, r_bb, w_cnode);
> +    }
> +
> +  gimple_stmt_iterator gsi;
> +  for (gsi = gsi_start_bb (w_bb); !gsi_end_p (gsi); gsi_next (&gsi))
> +    {
> +      if (gsi_stmt (gsi) == write->stmt)
> +       return true;
> +      if (gsi_stmt (gsi) == read->stmt)
> +       return false;
> +    }
> +
> +  /* Neither write nor read found it BB.  */
> +  gcc_unreachable ();
> +  return false;
> +}
> +
> +/*
> + * DFS over callees and return if callee is a or b.
> + */
> +static cgraph_node *
> +find_cgraph_in_callee_set (cgraph_node *n, hash_set<cgraph_node *> set,
> +                          cgraph_node *a, cgraph_node *b)
> +{
> +  cgraph_edge *cs;
> +  for (cs = n->callees; cs; cs = cs->next_callee)
> +    {
> +      cgraph_node *callee = cs->callee->function_symbol ();
> +      const char *const name = n->dump_asm_name ();
> +      const char *const cname = callee->dump_asm_name ();
> +      log ("Child of %s: %s\n", name, cname);
> +      if (callee == a)
> +       return a;
> +      if (callee == b)
> +       return b;
> +      if (!set.contains (callee))
> +       continue;
> +      return find_cgraph_in_callee_set (callee, set, a, b);
> +    }
> +  return NULL;
> +}
> +
> +/* Walks back along caller relations until main is found.  */
> +static cgraph_node *
> +get_ancestor_main_dfs (hash_set<cgraph_node *> *visited,
> +                      vec<cgraph_node *> *path, cgraph_node *node)
> +{
> +  if (MAIN_NAME_P (DECL_NAME (node->decl)))
> +    {
> +      path->safe_push (node);
> +      return node;
> +    }
> +
> +  visited->add (node);
> +
> +  cgraph_edge *cs;
> +  for (cs = node->callers; cs; cs = cs->next_caller)
> +    {
> +      cgraph_node *caller = cs->caller->function_symbol ();
> +      if (visited->contains (caller))
> +       continue;
> +
> +      cgraph_node *main = get_ancestor_main_dfs (visited, path, caller);
> +
> +      if (!main)
> +       continue;
> +
> +      path->safe_push (node);
> +      return main;
> +    }
> +  return NULL;
> +}
> +
> +/* Returns path from main to cgraph_node in the vector */
> +static cgraph_node *
> +get_path_from_main_to (cgraph_node *node, vec<cgraph_node *> *path)
> +{
> +  hash_set<cgraph_node *> visited;
> +  cgraph_node *main = get_ancestor_main_dfs (&visited, path, node);
> +  visited.empty ();
> +  return main;
> +}
> +
> +/*
> + * Verifying that a stmt s1 is dominated by a stmt s2
> + * across function borders is not trivial with the available
> + * infrastructure (dominator algorithm on bb level plus cgraph).
> + * If we rule out external calls/callbacks, then we still need
> + * to guarantee a write on each possible path to the read.
> + *
> + * The implemented solution to this problem, which is of course
> + * too strict,
> + * but also not too compute/memory intensive is as follows:
> + *
> + * - Check if write is reachable by main () by only looking into
> + *   the first bb in each function on the path.
> + * - All call stmts between main () and write must not possibly
> + *   reach a read.  We consider indirect call statements as
> + *   possible reaching read (unless we can prove opposite).
> + *
> + * The external calls/callbacks are ruled out as follows:
> + *
> + * - all possible ancestors of read must not be external visible
> + * - all possible ancestors of read must not be function pointers
> + * - Something else?
> + */
> +static bool
> +write_before_read_across_function_borders (struct ipa_ref *write,
> +                                          struct ipa_ref *read)
> +{
> +  symtab_node *w_node = write->referring;
> +  cgraph_node *w_cnode = dyn_cast<cgraph_node *> (w_node);
> +  symtab_node *r_node = read->referring;
> +  cgraph_node *r_cnode = dyn_cast<cgraph_node *> (r_node);
> +  cgraph_node *main;
> +
> +  /* Get main () function.  */
> +  vec<cgraph_node *> pw = vNULL;
> +  main = get_path_from_main_to (w_cnode, &pw);
> +  if (!main)
> +    {
> +      log ("main () is not ancestor of write -> skipping.\n");
> +      return false;
> +    }
> +
> +  vec<cgraph_node *> pr = vNULL;
> +  cgraph_node *main_to_read = get_path_from_main_to (r_cnode, &pr);
> +  if (!main_to_read)
> +    {
> +      log ("main () is not ancestor of read -> skipping.\n");
> +      return false;
> +    }
> +
> +  // Future work will involve looking at unconditional paths.
> +  if (!reach_nodes_via_bb1 (w_cnode))
> +    return false;
> +
> +  int i;
> +  cgraph_node *node_in_pr;
> +  FOR_EACH_VEC_ELT (pr, i, node_in_pr)
> +    {
> +      // I think main has to be externally visible.
> +      if (node_in_pr == main)
> +       continue;
> +
> +      /* Assure that all paths to read are not externally visible.  */
> +      if (cgraph_externally_visible_p (node_in_pr, flag_whole_program))
> +       {
> +         const char *const name = node_in_pr->dump_asm_name ();
> +         log ("%s is externally visible...  skipping\n", name);
> +         return false;
> +       }
> +
> +      /* Assure that all paths to read are not
> +       * used as function pointers.  */
> +      if (node_in_pr->address_taken)
> +       {
> +         const char *const name = node_in_pr->dump_asm_name ();
> +         log ("%s might be function pointer...  skipping\n", name);
> +         return false;
> +       }
> +    }
> +
> +  cgraph_node *caller = main;
> +  cgraph_node *node_in_pw;
> +  FOR_EACH_VEC_ELT (pw, i, node_in_pw)
> +    {
> +      gcc_assert (w_cnode != r_cnode);
> +      if (node_in_pw == r_cnode && path_exists (r_cnode, w_cnode))
> +       {
> +         return call_before_read (write, read);
> +       }
> +
> +      if (node_in_pw == w_cnode && path_exists (w_cnode, r_cnode))
> +       {
> +         return write_before_call (write, read);
> +       }
> +
> +      if (node_in_pw == main)
> +       {
> +         continue;
> +       }
> +
> +      const char *const cname = caller->dump_asm_name ();
> +      const char *const nname = node_in_pw->dump_asm_name ();
> +      log ("get_nondominated_callees from %s to %s\n", cname, nname);
> +
> +      bool exitEarly = false;
> +      vec<cgraph_node *> non_dominated_callees
> +       = get_nondominated_callees (caller, node_in_pw, &exitEarly);
> +      if (exitEarly)
> +       return false;
> +      cgraph_node *non_dominated_callee;
> +      int j;
> +      FOR_EACH_VEC_ELT (non_dominated_callees, j, non_dominated_callee)
> +       {
> +         const char *const ndname = non_dominated_callee->dump_asm_name ();
> +         log ("callee %d %s\n", j, ndname);
> +         if (!path_exists (non_dominated_callee, r_cnode))
> +           continue;
> +
> +         return false;
> +       }
> +
> +      caller = node_in_pw;
> +    }
> +  return true;
> +}
> +
> +/* Return callees accessible through bb1 */
> +static vec<struct cgraph_node *>
> +get_bb1_callees (cgraph_node *c, cgraph_node *w_cnode)
> +{
> +  vec<struct cgraph_node *> callees = vNULL;
> +  if (fndecl_built_in_p (c->decl))
> +    return vNULL;
> +
> +  const char *cname = c->dump_asm_name ();
> +  log ("before get_untransformed_body %s\n", cname);
> +
> +  if (!c->definition)
> +    return vNULL;
> +
> +  push_cfun (DECL_STRUCT_FUNCTION (c->decl));
> +
> +  gcc_assert (in_lto_p);
> +  c->get_untransformed_body ();
> +
> +  pop_cfun ();
> +
> +  function *func = DECL_STRUCT_FUNCTION (c->decl);
> +  /* Get first BB (after the fake entry BB).  */
> +  basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (func)->next_bb;
> +
> +  for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
> +       gsi_next (&gsi))
> +    {
> +      gimple *stmt = gsi_stmt (gsi);
> +
> +      if (gimple_code (stmt) != GIMPLE_CALL)
> +       continue;
> +
> +      tree callee_decl = gimple_call_fndecl (stmt);
> +      cgraph_node *callee_node = cgraph_node::get (callee_decl);
> +      if (!path_exists (callee_node, w_cnode))
> +       continue;
> +
> +      callees.safe_push (callee_node);
> +    }
> +
> +  return callees;
> +}
> +
> +/* Return true if n2 is reachable from n1 by visiting only first basic
> blocks */
> +static bool
> +reach_nodes_via_bb1_dfs (hash_set<cgraph_node *> *visited, cgraph_node *n1,
> +                        cgraph_node *n2)
> +{
> +  if (n1 == n2)
> +    return true;
> +
> +  visited->add (n1);
> +
> +  vec<struct cgraph_node *> callees = get_bb1_callees (n1, n2);
> +
> +  int i;
> +  cgraph_node *callee;
> +  FOR_EACH_VEC_ELT (callees, i, callee)
> +    {
> +      if (!visited->contains (callee))
> +       {
> +         bool found = reach_nodes_via_bb1_dfs (visited, callee, n2);
> +         if (found)
> +           {
> +             callees.release ();
> +             return true;
> +           }
> +       }
> +    }
> +
> +  return false;
> +}
> +
> +/* Returns true if n2 is reachable from main function via following
> call graph
> + * nodes reachable within first basic blocks */
> +bool
> +reach_nodes_via_bb1 (cgraph_node *n2)
> +{
> +  vec<cgraph_node *> pr = vNULL;
> +  cgraph_node *main = get_path_from_main_to (n2, &pr);
> +  hash_set<cgraph_node *> visited;
> +  bool path_exists = reach_nodes_via_bb1_dfs (&visited, main, n2);
> +  visited.empty ();
> +  pr.release ();
> +  return path_exists;
> +}
> +
> +/* Determine if write occurs before a function call which contains read*/
> +static bool
> +write_before_call (struct ipa_ref *write, struct ipa_ref *read)
> +{
> +  symtab_node *w_node = write->referring;
> +  cgraph_node *w_cnode = dyn_cast<cgraph_node *> (w_node);
> +  symtab_node *r_node = read->referring;
> +  cgraph_node *r_cnode = dyn_cast<cgraph_node *> (r_node);
> +
> +  gcc_assert (path_exists (w_cnode, r_cnode));
> +  gcc_assert (w_cnode != r_cnode);
> +
> +  basic_block w_bb = gimple_bb (write->stmt);
> +  basic_block r_bb = gimple_bb (read->stmt);
> +
> +  gcc_assert (in_lto_p);
> +  w_cnode->get_untransformed_body ();
> +
> +  function *func = DECL_STRUCT_FUNCTION (w_cnode->decl);
> +  basic_block c_bb;
> +  vec<struct cgraph_node *> callees = vNULL;
> +  FOR_EACH_BB_FN (c_bb, func)
> +    {
> +      bool self_dominates = w_bb == c_bb;
> +      bool w_bb_dominates_c_bb = bb1_dominates_bb2 (w_bb, c_bb, w_cnode);
> +      if (w_bb_dominates_c_bb && !self_dominates)
> +       continue;
> +
> +      for (gimple_stmt_iterator gsi = gsi_start_bb (c_bb); !gsi_end_p
> (gsi);
> +          gsi_next (&gsi))
> +       {
> +         gimple *stmt = gsi_stmt (gsi);
> +
> +         if (stmt == write->stmt)
> +           {
> +             break;
> +           }
> +
> +         if (gimple_code (stmt) != GIMPLE_CALL)
> +           {
> +             continue;
> +           }
> +         if (dump_file)
> +           print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
> +         tree rhs = gimple_op (stmt, 1);
> +
> +         if (rhs && TREE_CODE (rhs) == POINTER_TYPE)
> +           return false;
> +
> +         tree callee_decl = gimple_call_fndecl (stmt);
> +         if (!callee_decl)
> +           return false;
> +
> +         cgraph_node *callee_node = cgraph_node::get (callee_decl);
> +         const char *identifier
> +           = IDENTIFIER_POINTER (DECL_NAME (callee_node->decl));
> +         const char *sigsetjmp = "sigsetjmp";
> +         if (strstr (identifier, sigsetjmp) != NULL)
> +           return false;
> +
> +         if (fndecl_built_in_p (callee_node->decl))
> +           continue;
> +
> +         log ("found callee %s\n", callee_node->dump_asm_name ());
> +         callees.safe_push (callee_node);
> +       }
> +    }
> +  cgraph_node *callee;
> +  int i;
> +  FOR_EACH_VEC_ELT (callees, i, callee)
> +    {
> +      if (path_exists (callee, r_cnode))
> +       {
> +         return false;
> +       }
> +    }
> +
> +  return true;
> +}
> +
> +/* Determine if call which contains write occurs before a read*/
> +static bool
> +call_before_read (struct ipa_ref *write, struct ipa_ref *read)
> +{
> +  symtab_node *w_node = write->referring;
> +  cgraph_node *w_cnode = dyn_cast<cgraph_node *> (w_node);
> +  symtab_node *r_node = read->referring;
> +  cgraph_node *r_cnode = dyn_cast<cgraph_node *> (r_node);
> +
> +  gcc_assert (path_exists (r_cnode, w_cnode));
> +  gcc_assert (w_cnode != r_cnode);
> +
> +  basic_block w_bb = gimple_bb (write->stmt);
> +  basic_block r_bb = gimple_bb (read->stmt);
> +
> +  gcc_assert (in_lto_p);
> +  r_cnode->get_untransformed_body ();
> +
> +  function *func = DECL_STRUCT_FUNCTION (r_cnode->decl);
> +  basic_block c_bb;
> +  FOR_EACH_BB_FN (c_bb, func)
> +    {
> +      bool self_dominates = c_bb == r_bb;
> +      bool call_dominates_read = bb1_dominates_bb2 (c_bb, r_bb, r_cnode);
> +      if (!call_dominates_read && !self_dominates)
> +       continue;
> +
> +      for (gimple_stmt_iterator gsi = gsi_start_bb (c_bb); !gsi_end_p
> (gsi);
> +          gsi_next (&gsi))
> +       {
> +         gimple *stmt = gsi_stmt (gsi);
> +
> +         // self dominance
> +         if (stmt == read->stmt)
> +           break;
> +
> +         if (gimple_code (stmt) != GIMPLE_CALL)
> +           continue;
> +
> +         if (dump_file)
> +           print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
> +         tree rhs = gimple_op (stmt, 1);
> +         if (TREE_CODE (rhs) == POINTER_TYPE)
> +           return false;
> +         tree callee_decl = gimple_call_fndecl (stmt);
> +
> +         cgraph_node *callee_node = cgraph_node::get (callee_decl);
> +         const char *identifier
> +           = IDENTIFIER_POINTER (DECL_NAME (callee_node->decl));
> +         const char *sigsetjmp = "sigsetjmp";
> +         if (strstr (identifier, sigsetjmp) != NULL)
> +           return false;
> +
> +         if (path_exists (callee_node, w_cnode))
> +           return true;
> +       }
> +    }
> +  return false;
> +}
> +
> +/* Determine if write occurs before a read*/
> +static bool
> +write_before_read (struct ipa_ref *write, struct ipa_ref *read)
> +{
> +  symtab_node *w_node = write->referring;
> +  cgraph_node *w_cnode = dyn_cast<cgraph_node *> (w_node);
> +  symtab_node *r_node = read->referring;
> +  cgraph_node *r_cnode = dyn_cast<cgraph_node *> (r_node);
> +
> +  if (w_cnode == r_cnode)
> +    /* Within the same function.  */
> +    return write_before_read_in_function (write, read);
> +
> +  /* Not within the same function.  */
> +  return write_before_read_across_function_borders (write, read);
> +}
> +
> +/* Determine if write occurs before all reads*/
> +static bool
> +write_before_all_reads (struct ipa_ref *write, const ipa_ref_vec &reads)
> +{
> +  int i;
> +  struct ipa_ref *read;
> +
> +  FOR_EACH_VEC_ELT (reads, i, read)
> +    {
> +      load_function_body_of_ipa_ref (read);
> +      if (!write_before_read (write, read))
> +       return false;
> +    }
> +  return true;
> +}
> +
> +static void
> +propagate_constant_to_read (tree write_val, struct ipa_ref *ref,
> +                           hash_set<cgraph_node *> *func)
> +{
> +  gcc_assert (ref->use == IPA_REF_LOAD);
> +  load_function_body_of_ipa_ref (ref);
> +
> +  gimple *read_stmt = ref->stmt;
> +
> +  gcc_assert (gimple_code (read_stmt) == GIMPLE_ASSIGN);
> +  gcc_assert (gimple_num_ops (read_stmt) == 2);
> +
> +  tree old_lhs = gimple_op (read_stmt, 0);
> +  symtab_node *r_node = ref->referring;
> +  cgraph_node *r_cnode = dyn_cast<cgraph_node *> (r_node);
> +
> +  cgraph_node **possible_clone = func_to_clone->get (r_cnode);
> +  if (!possible_clone)
> +    {
> +      static const char *const suffix = INITCALL_CP_SUFFIX;
> +      // callers has to be vNULL, otherwise, we will be
> +      // analyzing clones...
> +      // and we don't want that...
> +      // but that means that we will need to update the callers
> +      // later...  in update_callgraph
> +      cgraph_node *clone
> +       = r_cnode->create_version_clone_with_body (vNULL, NULL, NULL, NULL,
> +                                                  NULL, suffix, NULL);
> +      clone->get_untransformed_body ();
> +      func_to_clone->put (r_cnode, clone);
> +    }
> +
> +  possible_clone = func_to_clone->get (r_cnode);
> +  cgraph_node *clone = *possible_clone;
> +
> +  // Build new stmt and replace old.
> +  gimple_stmt_iterator gsi;
> +  basic_block bb;
> +  // Let's create a clone with body here...
> +  // The clone should not have callers as
> +  // to not interfere with the current
> +  // analysis.
> +  // The callers will be set at the end...
> +
> +  push_cfun (DECL_STRUCT_FUNCTION (clone->decl));
> +  function *clone_func = DECL_STRUCT_FUNCTION (clone->decl);
> +  bool found = false;
> +  FOR_EACH_BB_FN (bb, clone_func)
> +    {
> +      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
> +       {
> +         gimple *stmt = gsi_stmt (gsi);
> +         if (gimple_code (stmt) != GIMPLE_ASSIGN)
> +           continue;
> +         tree old_val_clone = gimple_op (stmt, 1);
> +         tree lhs = gimple_op (stmt, 0);
> +
> +         if (TREE_CODE (old_val_clone) != VAR_DECL)
> +           continue;
> +
> +         tree old_val = gimple_op (read_stmt, 1);
> +         if (IDENTIFIER_POINTER (DECL_NAME (old_val_clone))
> +             != IDENTIFIER_POINTER (DECL_NAME (old_val)))
> +           continue;
> +
> +         found = true;
> +
> +         gimple_stmt_iterator gsi2 = gsi_for_stmt (stmt);
> +         tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
> +         gimple *new_read_stmt = gimple_build_assign (new_lhs, write_val);
> +         gsi_insert_before (&gsi2, new_read_stmt, GSI_SAME_STMT);
> +
> +         gimple *use_stmt;
> +         imm_use_iterator use_iter;
> +         FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
> +           {
> +             use_operand_p use_p;
> +             FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
> +               SET_USE (use_p, new_lhs);
> +             update_stmt (use_stmt);
> +           }
> +       }
> +
> +      if (found)
> +       break;
> +    }
> +  gcc_assert (found);
> +  pop_cfun ();
> +}
> +
> +static bool
> +ipa_initcall_get_writes_and_reads (varpool_node *vnode, ipa_ref_vec
> *writes,
> +                                  ipa_ref_vec *reads)
> +{
> +  int i;
> +  struct ipa_ref *ref;
> +
> +  log ("%s for variable '%s'.\n", __func__, vnode->name ());
> +
> +  /* Only IPA_REF_STORE and IPA_REF_LOAD left.  */
> +  for (i = 0; vnode->iterate_referring (i, ref); i++)
> +    {
> +      symtab_node *f_node = ref->referring;
> +      cgraph_node *f_cnode = dyn_cast<cgraph_node *> (f_node);
> +
> +      if (!f_cnode)
> +       {
> +         log ("skipping variable %s due to static initialization\n",
> +              vnode->name ());
> +         return false;
> +       }
> +      // it is possible that f_cnode is NULL if the dyn_cast fails.
> +      // If the dyn_cast fails, this is an example of static
> initialization.
> +      const char *identifier = IDENTIFIER_POINTER (DECL_NAME
> (f_cnode->decl));
> +      static const char *const suffix = INITCALL_CP_SUFFIX;
> +      if (strstr (identifier, suffix) != NULL)
> +       continue;
> +
> +      if (ref->use == IPA_REF_STORE)
> +       writes->safe_push (ref);
> +
> +      if (ref->use == IPA_REF_LOAD)
> +       reads->safe_push (ref);
> +    }
> +  return true;
> +}
> +
> +static void
> +propagate_constant_to_reads (tree write_val, const ipa_ref_vec
> &reads_original,
> +                            hash_set<cgraph_node *> *funcs)
> +{
> +  /* Iterate over reads and replace them by constant.  */
> +  struct ipa_ref *ref;
> +  int i;
> +  FOR_EACH_VEC_ELT (reads_original, i, ref)
> +    {
> +      propagate_constant_to_read (write_val, ref, funcs);
> +    }
> +}
> +
> +/*
> + * Extracts the assigned constant, iff the given statement
> + * is a constant assignment.  Returns NULL_TREE otherwise.
> + */
> +static tree
> +extract_constant_from_initcall_write (struct ipa_ref *write)
> +{
> +  symtab_node *f_node = write->referring;
> +  cgraph_node *f_cnode = dyn_cast<cgraph_node *> (f_node);
> +
> +  tree decl = f_cnode->decl;
> +  if (TREE_CODE (decl) != FUNCTION_DECL)
> +    {
> +      log ("Not function decl -> skipping.\n");
> +      return NULL_TREE;
> +    }
> +
> +  if (!f_cnode->has_gimple_body_p ())
> +    log ("Does NOT have gimple body!\n");
> +  if (f_cnode->inlined_to)
> +    log ("Inlined To\n");
> +
> +  log ("%s: for writer gimple:\n", __func__);
> +  dump_vnode_ipa_ref (write);
> +
> +  /* Get the write function's body.  */
> +  load_function_body_of_ipa_ref (write);
> +
> +  gimple *stmt = write->stmt;
> +
> +  /* Verify that we have an assignment.  */
> +  if (gimple_code (stmt) != GIMPLE_ASSIGN)
> +    {
> +      log ("Write stmt not assignment -> skipping.\n");
> +      return NULL_TREE;
> +    }
> +
> +  /* Check if write's LHS (vnode) is not volatile.  */
> +  tree lhs = gimple_assign_lhs (stmt);
> +  if (TREE_THIS_VOLATILE (TREE_TYPE (lhs)))
> +    {
> +      log ("Variable volatile -> skipping.\n");
> +      return NULL_TREE;
> +    }
> +
> +  /* Check if RHS of write stmt is constant.  */
> +  if (!gimple_assign_is_single_const (stmt))
> +    {
> +      log ("Found non-const operands.\n");
> +      return NULL_TREE;
> +    }
> +
> +  /* Extract the constant.  */
> +  tree write_val = gimple_op (stmt, 1);
> +
> +  if (dump_file)
> +    {
> +      log ("Const op:\n");
> +      dump_node (write_val, TDF_DETAILS, dump_file);
> +    }
> +
> +  return write_val;
> +}
> +
> +static void
> +ipa_initcall_cp_execute_for_var (varpool_node *vnode,
> +                                hash_set<cgraph_node *> *update_functions)
> +{
> +  ipa_ref_vec writes = vNULL;
> +  ipa_ref_vec reads = vNULL;
> +  struct ipa_ref *write;
> +  tree write_val;
> +  gimple_stmt_iterator gsi;
> +  bool remove_permanently;
> +
> +  log ("%s for variable '%s'.\n", __func__, vnode->name ());
> +
> +  if (dump_file)
> +    {
> +      dump_node (vnode->decl, TDF_DETAILS, dump_file);
> +    }
> +
> +  /* Variable must not be externally visible.  */
> +  if (vnode->externally_visible_p ())
> +    {
> +      log ("\texternally visible -> skipping\n");
> +      return;
> +    }
> +
> +  /* All refs must be explicit.  */
> +  if (!vnode->all_refs_explicit_p ())
> +    {
> +      log ("Not explicit variable refs -> skipping.\n");
> +      return;
> +    }
> +
> +  /* Check if any ref->use is IPA_REF_ALIAS.  */
> +  if (vnode->has_aliases_p ())
> +    {
> +      log ("Found IPA_REF_ALIAS -> skipping.\n");
> +      return;
> +    }
> +
> +  /* Check if any ref->use is IPA_REF_ADDR.  */
> +  if (vnode->alias || vnode->address_matters_p ())
> +    {
> +      log ("Found IPA_REF_ADDR -> skipping.\n");
> +      return;
> +    }
> +
> +  /* We don't touch arrays.  */
> +  if (TREE_CODE (TREE_TYPE (vnode->decl)) == ARRAY_TYPE)
> +    {
> +      log ("Variable is array -> skipping.\n");
> +      return;
> +    }
> +
> +  /* We don't touch structs.  */
> +  if (TREE_CODE (TREE_TYPE (vnode->decl)) == RECORD_TYPE)
> +    {
> +      log ("Variable is struct -> skipping.\n");
> +      return;
> +    }
> +
> +  /* We don't touch unions.  */
> +  if (TREE_CODE (TREE_TYPE (vnode->decl)) == UNION_TYPE)
> +    {
> +      log ("Variable is union -> skipping.\n");
> +      return;
> +    }
> +
> +  /* Get all refs (must be REF_STORE or REF_LOAD).  */
> +  bool continue_work
> +    = ipa_initcall_get_writes_and_reads (vnode, &writes, &reads);
> +  if (!continue_work)
> +    {
> +      log ("Static initialization -> skipping.\n");
> +      writes.release ();
> +      reads.release ();
> +      return;
> +    }
> +
> +  if (writes.length () > 1)
> +    {
> +      /* More than one writer.  */
> +      log ("More than one writer -> skipping.\n");

IPA analysis already computes whether all accesses to a variable
happen in one function, I suppose computing whether all
writes happen in one function would be easy as well.

> +      writes.release ();
> +      reads.release ();
> +      return;
> +    }
> +  else if (writes.length () < 1)
> +    {
> +      /* No writer.  */
> +      log ("No writer -> skipping.\n");
> +      writes.release ();
> +      reads.release ();
> +      return;
> +    }
> +
> +  /*
> +   * Note:
> +   * Limiting ourselves to only one write is not necessary in general,
> +   * but good enough to address the typical init () case.
> +   * Big advantage is, that it makes the following code much easier.
> +   */
> +
> +  /* Get (only) write ref.  */
> +  write = writes.pop ();
> +
> +  /* Check if write's RHS is a constant and get it.  */
> +  write_val = extract_constant_from_initcall_write (write);
> +  if (write_val == NULL_TREE)
> +    {
> +      log ("Write's RHS is not constant -> skipping.\n");
> +      writes.release ();
> +      reads.release ();
> +      return;
> +    }
> +
> +  /* Assure all reads are after the write.  */
> +  if (!write_before_all_reads (write, reads))
> +    {
> +      log ("Write not guaranteed to be before read -> skipping.\n");
> +      writes.release ();
> +      reads.release ();
> +      return;
> +    }
> +
> +  /* Propagate constant to reads.  */
> +  if (!flag_initcall_cp_dryrun)
> +    propagate_constant_to_reads (write_val, reads, update_functions);
> +
> +  /* Finally remove the write.  */
> +  gsi = gsi_for_stmt (write->stmt);
> +  remove_permanently = false; // XXX: fails with true?
> +  // gsi_remove (&gsi, remove_permanently);
> +
> +  log ("Eliminated variable '%s'.\n\n", vnode->name ());
> +
> +  writes.release ();
> +  reads.release ();
> +}
> +
> +/* Replace functions with clones */
> +bool
> +update_callgraph (cgraph_node *const &r_cnode, cgraph_node **clone_ptr,
> void *)
> +{
> +  vec<cgraph_edge *> callers = r_cnode->collect_callers ();
> +  cgraph_node *clone = *clone_ptr;
> +  cgraph_edge *e;
> +  int i;
> +  profile_count count = r_cnode->count;
> +
> +  FOR_EACH_VEC_ELT (callers, i, e)
> +    e->redirect_callee (clone);
> +
> +  for (e = clone->callers; e; e = e->next_caller)
> +    {
> +      e->caller->get_untransformed_body ();
> +      function *inner_function = DECL_STRUCT_FUNCTION (e->caller->decl);
> +      gimple_call_set_fndecl (e->call_stmt, clone->decl);
> +      maybe_clean_eh_stmt_fn (inner_function, e->call_stmt);
> +    }
> +
> +  r_cnode->skip_ipa_cp = true;
> +  push_cfun (DECL_STRUCT_FUNCTION (r_cnode->decl));
> +
> +  log ("dumping function %s\n", r_cnode->dump_asm_name ());
> +  function *func = DECL_STRUCT_FUNCTION (r_cnode->decl);
> +  basic_block bb;
> +  FOR_EACH_BB_FN (bb, func)
> +    {
> +      for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
> +          gsi_next (&gsi))
> +       {
> +         gimple *stmt = gsi_stmt (gsi);
> +         if (dump_file)
> +           print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
> +       }
> +    }
> +  if (dom_info_available_p (CDI_DOMINATORS))
> +    free_dominance_info (CDI_DOMINATORS);
> +  pop_cfun ();
> +  return true;
> +}
> +
> +static unsigned int
> +ipa_initcall_cp_execute (void)
> +{
> +  varpool_node *vnode;
> +
> +  clone_num_suffixes1 = new hash_map<const char *, unsigned>;
> +  hash_set<cgraph_node *> update_functions;
> +  func_to_clone = new hash_map<cgraph_node *, cgraph_node *>;
> +  FOR_EACH_VARIABLE (vnode)
> +    {
> +      ipa_initcall_cp_execute_for_var (vnode, &update_functions);
> +    }
> +
> +  if (!flag_initcall_cp_dryrun)
> +    func_to_clone->traverse<void *, update_callgraph> (NULL);
> +
> +  delete clone_num_suffixes1;
> +  delete func_to_clone;
> +
> +  return 0;
> +}
> +
> +namespace {
> +
> +const pass_data pass_data_ipa_initcall_cp = {
> +  SIMPLE_IPA_PASS,
> +  "initcall-cp",
> +  OPTGROUP_NONE,
> +  TV_NONE,
> +  (PROP_cfg | PROP_ssa),
> +  0,
> +  0,
> +  0,
> +  (TODO_update_ssa | TODO_cleanup_cfg | TODO_dump_symtab
> +   | TODO_rebuild_cgraph_edges | TODO_discard_function),
> +};
> +
> +class pass_ipa_initcall_cp : public ipa_opt_pass_d
> +{
> +public:
> +  pass_ipa_initcall_cp (gcc::context *ctxt)
> +    : ipa_opt_pass_d (pass_data_ipa_initcall_cp, ctxt, NULL, NULL,
> NULL, NULL,
> +                     NULL, NULL, 0, NULL, NULL)
> +  {}
> +
> +  /* opt_pass methods: */
> +  virtual bool gate (function *)
> +  {
> +    return in_lto_p && (flag_initcall_cp || flag_initcall_cp_dryrun);
> +  }
> +
> +  virtual unsigned int execute (function *)
> +  {
> +    return ipa_initcall_cp_execute ();
> +  }
> +
> +}; // class pass_ipa_initcall_cp
> +
> +} // namespace
> +
> +ipa_opt_pass_d *
> +make_pass_ipa_initcall_cp (gcc::context *ctxt)
> +{
> +  return new pass_ipa_initcall_cp (ctxt);
> +}
> diff --git a/gcc/passes.def b/gcc/passes.def
> index 2bf2cb78fc5..62609951bac 100644
> --- a/gcc/passes.def
> +++ b/gcc/passes.def
> @@ -149,6 +149,7 @@ along with GCC; see the file COPYING3.  If not see
>     NEXT_PASS (pass_ipa_profile);
>     NEXT_PASS (pass_ipa_icf);
>     NEXT_PASS (pass_ipa_devirt);
> +  NEXT_PASS (pass_ipa_initcall_cp);
>     NEXT_PASS (pass_ipa_cp);
>     NEXT_PASS (pass_ipa_sra);
>     NEXT_PASS (pass_ipa_cdtor_merge);
> diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
> index a1207a20a3c..65e09c657b7 100644
> --- a/gcc/tree-pass.h
> +++ b/gcc/tree-pass.h
> @@ -501,6 +501,7 @@ extern ipa_opt_pass_d *make_pass_ipa_fn_summary
> (gcc::context *ctxt);
>   extern ipa_opt_pass_d *make_pass_ipa_inline (gcc::context *ctxt);
>   extern simple_ipa_opt_pass *make_pass_ipa_free_lang_data (gcc::context
> *ctxt);
>   extern simple_ipa_opt_pass *make_pass_ipa_free_fn_summary
> (gcc::context *ctxt);
> +extern ipa_opt_pass_d *make_pass_ipa_initcall_cp (gcc::context *ctxt);
>   extern ipa_opt_pass_d *make_pass_ipa_cp (gcc::context *ctxt);
>   extern ipa_opt_pass_d *make_pass_ipa_sra (gcc::context *ctxt);
>   extern ipa_opt_pass_d *make_pass_ipa_icf (gcc::context *ctxt);
> --
> 2.18.1
>
diff mbox series

Patch

diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 543b477ff18..b94fb633d78 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1401,6 +1401,7 @@  OBJS = \
  	internal-fn.o \
  	ipa-cp.o \
  	ipa-sra.o \
+	ipa-initcall-cp.o \
  	ipa-devirt.o \
  	ipa-fnsummary.o \
  	ipa-polymorphic-call.o \
diff --git a/gcc/cgraph.h b/gcc/cgraph.h
index 5ddeb65269b..532b4671609 100644
--- a/gcc/cgraph.h
+++ b/gcc/cgraph.h
@@ -937,7 +937,7 @@  struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : 
public symtab_node
        split_part (false), indirect_call_target (false), local (false),
        versionable (false), can_change_signature (false),
        redefined_extern_inline (false), tm_may_enter_irr (false),
-      ipcp_clone (false), m_uid (uid), m_summary_id (-1)
+      ipcp_clone (false), skip_ipa_cp (false), m_uid (uid), 
m_summary_id (-1)
    {}

    /* Remove the node from cgraph and all inline clones inlined into it.
@@ -1539,6 +1539,9 @@  struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node 
: public symtab_node
    unsigned tm_may_enter_irr : 1;
    /* True if this was a clone created by ipa-cp.  */
    unsigned ipcp_clone : 1;
+  /* True if ipa-initcall-cp and therefore we need to skip ipa-cp for 
cgraph
+   * node.  */
+  unsigned skip_ipa_cp : 1;

  private:
    /* Unique id of the node.  */
diff --git a/gcc/common.opt b/gcc/common.opt
index d33383b523c..b2d8d1b0958 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -3408,4 +3408,12 @@  fipa-ra
  Common Report Var(flag_ipa_ra) Optimization
  Use caller save register across calls if possible.

+fipa-initcall-cp-dryrun
+Common Report Var(flag_initcall_cp_dryrun)
+Run analysis for propagating constants across initialization functions.
+
+fipa-initcall-cp
+Common Report Var(flag_initcall_cp) Optimization
+Run transformation for propagation constants across initialization 
functions.
+
  ; This comment is to ensure we retain the blank line above.
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index c64e9104a94..1036cb0684e 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -1188,7 +1188,7 @@  initialize_node_lattices (struct cgraph_node *node)

    gcc_checking_assert (node->has_gimple_body_p ());

-  if (!ipa_get_param_count (info))
+  if (!ipa_get_param_count (info) || node->skip_ipa_cp)
      disable = true;
    else if (node->local)
      {
diff --git a/gcc/ipa-initcall-cp.c b/gcc/ipa-initcall-cp.c
new file mode 100644
index 00000000000..02f70b7a908
--- /dev/null
+++ b/gcc/ipa-initcall-cp.c
@@ -0,0 +1,1199 @@ 
+/* Initcall constant propagation pass.
+   Copyright (C) 2019-2020 Free Software Foundation, Inc.
+
+   Contributed by Erick Ochoa <erick.ochoa@theobroma-systems.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "tree.h"
+#include "tree-eh.h"
+#include "gimple.h"
+#include "gimple-expr.h"
+#include "gimple-iterator.h"
+#include "predict.h"
+#include "alloc-pool.h"
+#include "tree-pass.h"
+#include "cgraph.h"
+#include "diagnostic.h"
+#include "fold-const.h"
+#include "gimple-fold.h"
+#include "symbol-summary.h"
+#include "tree-vrp.h"
+#include "ipa-prop.h"
+#include "tree-pretty-print.h"
+#include "tree-inline.h"
+#include "ipa-fnsummary.h"
+#include "ipa-utils.h"
+#include "tree-ssa-ccp.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "gimple-pretty-print.h"
+#include "ssa.h"
+
+#define INITCALL_CP_SUFFIX "initcall.cp"
+
+typedef vec<struct ipa_ref *> ipa_ref_vec;
+
+/* log to dump file */
+static inline void
+log (const char *const fmt, ...)
+{
+  if (!dump_file)
+    return;
+
+  va_list args;
+  va_start (args, fmt);
+  vfprintf (dump_file, fmt, args);
+  va_end (args);
+}
+
+bool
+reach_nodes_via_bb1 (cgraph_node *n2);
+static bool
+bb1_dominates_bb2 (basic_block, basic_block, cgraph_node *);
+static bool
+write_before_call (struct ipa_ref *, struct ipa_ref *);
+static bool
+call_before_read (struct ipa_ref *, struct ipa_ref *);
+static hash_map<const char *, unsigned> *clone_num_suffixes1;
+static hash_map<cgraph_node *, cgraph_node *> *func_to_clone;
+static vec<struct cgraph_node *>
+get_nondominated_callees (cgraph_node *caller, cgraph_node *callee,
+			  bool *exitEarly = NULL);
+
+static void
+load_function_body_of_ipa_ref (cgraph_node *node)
+{
+  gcc_assert (in_lto_p);
+  cgraph_node *f_cnode2 = node->ultimate_alias_target ();
+  const char *name = f_cnode2->dump_asm_name ();
+  log ("%s: for function '%s'\n", __func__, name);
+  if (dump_file)
+    {
+      dump_node (f_cnode2->decl, TDF_DETAILS, dump_file);
+    }
+  f_cnode2->get_untransformed_body ();
+}
+
+static void
+load_function_body_of_ipa_ref (struct ipa_ref *ref)
+{
+  /* IPA passes don't get the function bodies during LTO.  */
+  gcc_assert (in_lto_p);
+
+  symtab_node *f_node = ref->referring;
+  cgraph_node *f_cnode = dyn_cast<cgraph_node *> (f_node);
+  load_function_body_of_ipa_ref (f_cnode);
+}
+
+static void
+dump_vnode_ipa_ref (ipa_ref *ref)
+{
+  if (!dump_file)
+    return;
+  log ("Reference type: %s\n", stringify_ipa_ref_use (ref->use));
+
+  symtab_node *f_node = ref->referring;
+  log ("Reference function: %s\n", f_node->dump_asm_name ());
+
+  log ("Gimple stmt (@0x%lx, lto_stmt_uid: %u):\n", (long) ref->stmt,
+       ref->lto_stmt_uid);
+  load_function_body_of_ipa_ref (ref);
+  print_gimple_stmt (dump_file, ref->stmt, 0, TDF_NONE);
+}
+
+/* Return true of all ops of assignment are constants.  */
+static bool
+gimple_assign_is_single_const (gimple *stmt)
+{
+  tree op;
+
+  gcc_assert (gimple_code (stmt) == GIMPLE_ASSIGN);
+
+  if (gimple_has_volatile_ops (stmt))
+    {
+      log ("has volatile ops!\n");
+      return false;
+    }
+
+  if (gimple_num_ops (stmt) != 2)
+    {
+      log ("wrong num of ops: %u!\n", gimple_num_ops (stmt));
+      return false;
+    }
+
+  op = gimple_op (stmt, 1);
+  if (!tree_code_is_cst (op))
+    {
+      log ("op is not cst!\n");
+      return false;
+    }
+
+  return true;
+}
+
+/* Collects calls which are not dominated by callee */
+// assumptions:
+//  * there is a callsite from caller to callee
+static vec<struct cgraph_node *>
+get_nondominated_callees (cgraph_node *caller, cgraph_node *callee,
+			  bool *exitEarly)
+{
+  gcc_assert (in_lto_p);
+  caller->get_untransformed_body ();
+
+  function *func = DECL_STRUCT_FUNCTION (caller->decl);
+  basic_block bb;
+  bool found = false;
+  FOR_EACH_BB_FN (bb, func)
+    {
+      for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
+	   gsi_next (&gsi))
+	{
+	  gimple *stmt = gsi_stmt (gsi);
+	  if (gimple_code (stmt) != GIMPLE_CALL)
+	    continue;
+
+	  if (dump_file)
+	    print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
+	  tree rhs = gimple_op (stmt, 1);
+	  if (TREE_CODE (rhs) == RECORD_TYPE)
+	    {
+	      gcc_assert (exitEarly);
+	      *exitEarly = true;
+	      return vNULL;
+	    }
+	  tree callee_decl = gimple_call_fndecl (stmt);
+	  if (!callee_decl)
+	    {
+	      gcc_assert (exitEarly);
+	      *exitEarly = true;
+	      return vNULL;
+	    }
+	  cgraph_node *callee_node = cgraph_node::get (callee_decl);
+	  if (callee_node != callee)
+	    continue;
+
+	  found = true;
+	}
+      if (found)
+	break;
+    }
+
+  gcc_assert (found);
+
+  // At this point in the program, we hold a valid bb.
+  // The callee is located in the bb
+  vec<struct cgraph_node *> callees = vNULL;
+  basic_block bb_c;
+  FOR_EACH_BB_FN (bb_c, func)
+    {
+      bool self_dominates = bb == bb_c;
+      bool bb_dominates_bbc = bb1_dominates_bb2 (bb, bb_c, caller);
+      if (bb_dominates_bbc && !self_dominates)
+	continue;
+
+      for (gimple_stmt_iterator gsi = gsi_start_bb (bb_c); !gsi_end_p 
(gsi);
+	   gsi_next (&gsi))
+	{
+	  gimple *stmt = gsi_stmt (gsi);
+	  if (gimple_code (stmt) != GIMPLE_CALL)
+	    continue;
+
+	  tree callee_decl = gimple_call_fndecl (stmt);
+	  cgraph_node *callee_node = cgraph_node::get (callee_decl);
+
+	  if (fndecl_built_in_p (callee_node->decl))
+	    continue;
+
+	  if (self_dominates && callee_node == callee)
+	    {
+	      break;
+	    }
+
+	  callees.safe_push (callee_node);
+	}
+    }
+  return callees;
+}
+
+/* Returns true if bb1 dominates bb2
+ * Includes self-dominance */
+static bool
+bb1_dominates_bb2 (basic_block bb1, basic_block bb2, cgraph_node *cnode)
+{
+  // self dominance
+  if (bb1 == bb2)
+    return true;
+
+  push_cfun (DECL_STRUCT_FUNCTION (cnode->decl));
+
+  /* If dominator info is not available, we need to calculate it.  */
+  if (!dom_info_available_p (CDI_DOMINATORS))
+    calculate_dominance_info (CDI_DOMINATORS);
+
+  /* Check if the read is dominated by the write.  */
+  bool ret = dominated_by_p (CDI_DOMINATORS, bb2, bb1);
+
+  /* Restore cfun.  */
+  pop_cfun ();
+
+  return ret;
+}
+
+/* Return true if write occurs before read.
+ * write and read must be located in the same function
+ * */
+static bool
+write_before_read_in_function (struct ipa_ref *write, struct ipa_ref *read)
+{
+  basic_block w_bb = gimple_bb (write->stmt);
+  basic_block r_bb = gimple_bb (read->stmt);
+
+  if (w_bb != r_bb)
+    {
+      /*
+       * The dominator framework operates on cfun.
+       * Therefore we need to set cfun accordingly.
+       */
+      symtab_node *w_node = write->referring;
+      cgraph_node *w_cnode = dyn_cast<cgraph_node *> (w_node);
+      return bb1_dominates_bb2 (w_bb, r_bb, w_cnode);
+    }
+
+  gimple_stmt_iterator gsi;
+  for (gsi = gsi_start_bb (w_bb); !gsi_end_p (gsi); gsi_next (&gsi))
+    {
+      if (gsi_stmt (gsi) == write->stmt)
+	return true;
+      if (gsi_stmt (gsi) == read->stmt)
+	return false;
+    }
+
+  /* Neither write nor read found it BB.  */
+  gcc_unreachable ();
+  return false;
+}
+
+/*
+ * DFS over callees and return if callee is a or b.
+ */
+static cgraph_node *
+find_cgraph_in_callee_set (cgraph_node *n, hash_set<cgraph_node *> set,
+			   cgraph_node *a, cgraph_node *b)
+{
+  cgraph_edge *cs;
+  for (cs = n->callees; cs; cs = cs->next_callee)
+    {
+      cgraph_node *callee = cs->callee->function_symbol ();
+      const char *const name = n->dump_asm_name ();
+      const char *const cname = callee->dump_asm_name ();
+      log ("Child of %s: %s\n", name, cname);
+      if (callee == a)
+	return a;
+      if (callee == b)
+	return b;
+      if (!set.contains (callee))
+	continue;
+      return find_cgraph_in_callee_set (callee, set, a, b);
+    }
+  return NULL;
+}
+
+/* Walks back along caller relations until main is found.  */
+static cgraph_node *
+get_ancestor_main_dfs (hash_set<cgraph_node *> *visited,
+		       vec<cgraph_node *> *path, cgraph_node *node)
+{
+  if (MAIN_NAME_P (DECL_NAME (node->decl)))
+    {
+      path->safe_push (node);
+      return node;
+    }
+
+  visited->add (node);
+
+  cgraph_edge *cs;
+  for (cs = node->callers; cs; cs = cs->next_caller)
+    {
+      cgraph_node *caller = cs->caller->function_symbol ();
+      if (visited->contains (caller))
+	continue;
+
+      cgraph_node *main = get_ancestor_main_dfs (visited, path, caller);
+
+      if (!main)
+	continue;
+
+      path->safe_push (node);
+      return main;
+    }
+  return NULL;
+}
+
+/* Returns path from main to cgraph_node in the vector */
+static cgraph_node *
+get_path_from_main_to (cgraph_node *node, vec<cgraph_node *> *path)
+{
+  hash_set<cgraph_node *> visited;
+  cgraph_node *main = get_ancestor_main_dfs (&visited, path, node);
+  visited.empty ();
+  return main;
+}
+
+/*
+ * Verifying that a stmt s1 is dominated by a stmt s2
+ * across function borders is not trivial with the available
+ * infrastructure (dominator algorithm on bb level plus cgraph).
+ * If we rule out external calls/callbacks, then we still need
+ * to guarantee a write on each possible path to the read.
+ *
+ * The implemented solution to this problem, which is of course
+ * too strict,
+ * but also not too compute/memory intensive is as follows:
+ *
+ * - Check if write is reachable by main () by only looking into
+ *   the first bb in each function on the path.
+ * - All call stmts between main () and write must not possibly
+ *   reach a read.  We consider indirect call statements as
+ *   possible reaching read (unless we can prove opposite).
+ *
+ * The external calls/callbacks are ruled out as follows:
+ *
+ * - all possible ancestors of read must not be external visible
+ * - all possible ancestors of read must not be function pointers
+ * - Something else?
+ */
+static bool
+write_before_read_across_function_borders (struct ipa_ref *write,
+					   struct ipa_ref *read)
+{
+  symtab_node *w_node = write->referring;
+  cgraph_node *w_cnode = dyn_cast<cgraph_node *> (w_node);
+  symtab_node *r_node = read->referring;
+  cgraph_node *r_cnode = dyn_cast<cgraph_node *> (r_node);
+  cgraph_node *main;
+
+  /* Get main () function.  */
+  vec<cgraph_node *> pw = vNULL;
+  main = get_path_from_main_to (w_cnode, &pw);
+  if (!main)
+    {
+      log ("main () is not ancestor of write -> skipping.\n");
+      return false;
+    }
+
+  vec<cgraph_node *> pr = vNULL;
+  cgraph_node *main_to_read = get_path_from_main_to (r_cnode, &pr);
+  if (!main_to_read)
+    {
+      log ("main () is not ancestor of read -> skipping.\n");
+      return false;
+    }
+
+  // Future work will involve looking at unconditional paths.
+  if (!reach_nodes_via_bb1 (w_cnode))
+    return false;
+
+  int i;
+  cgraph_node *node_in_pr;
+  FOR_EACH_VEC_ELT (pr, i, node_in_pr)
+    {
+      // I think main has to be externally visible.
+      if (node_in_pr == main)
+	continue;
+
+      /* Assure that all paths to read are not externally visible.  */
+      if (cgraph_externally_visible_p (node_in_pr, flag_whole_program))
+	{
+	  const char *const name = node_in_pr->dump_asm_name ();
+	  log ("%s is externally visible...  skipping\n", name);
+	  return false;
+	}
+
+      /* Assure that all paths to read are not
+       * used as function pointers.  */
+      if (node_in_pr->address_taken)
+	{
+	  const char *const name = node_in_pr->dump_asm_name ();
+	  log ("%s might be function pointer...  skipping\n", name);
+	  return false;
+	}
+    }
+
+  cgraph_node *caller = main;
+  cgraph_node *node_in_pw;
+  FOR_EACH_VEC_ELT (pw, i, node_in_pw)
+    {
+      gcc_assert (w_cnode != r_cnode);
+      if (node_in_pw == r_cnode && path_exists (r_cnode, w_cnode))
+	{
+	  return call_before_read (write, read);
+	}
+
+      if (node_in_pw == w_cnode && path_exists (w_cnode, r_cnode))
+	{
+	  return write_before_call (write, read);
+	}
+
+      if (node_in_pw == main)
+	{
+	  continue;
+	}
+
+      const char *const cname = caller->dump_asm_name ();
+      const char *const nname = node_in_pw->dump_asm_name ();
+      log ("get_nondominated_callees from %s to %s\n", cname, nname);
+
+      bool exitEarly = false;
+      vec<cgraph_node *> non_dominated_callees
+	= get_nondominated_callees (caller, node_in_pw, &exitEarly);
+      if (exitEarly)
+	return false;
+      cgraph_node *non_dominated_callee;
+      int j;
+      FOR_EACH_VEC_ELT (non_dominated_callees, j, non_dominated_callee)
+	{
+	  const char *const ndname = non_dominated_callee->dump_asm_name ();
+	  log ("callee %d %s\n", j, ndname);
+	  if (!path_exists (non_dominated_callee, r_cnode))
+	    continue;
+
+	  return false;
+	}
+
+      caller = node_in_pw;
+    }
+  return true;
+}
+
+/* Return callees accessible through bb1 */
+static vec<struct cgraph_node *>
+get_bb1_callees (cgraph_node *c, cgraph_node *w_cnode)
+{
+  vec<struct cgraph_node *> callees = vNULL;
+  if (fndecl_built_in_p (c->decl))
+    return vNULL;
+
+  const char *cname = c->dump_asm_name ();
+  log ("before get_untransformed_body %s\n", cname);
+
+  if (!c->definition)
+    return vNULL;
+
+  push_cfun (DECL_STRUCT_FUNCTION (c->decl));
+
+  gcc_assert (in_lto_p);
+  c->get_untransformed_body ();
+
+  pop_cfun ();
+
+  function *func = DECL_STRUCT_FUNCTION (c->decl);
+  /* Get first BB (after the fake entry BB).  */
+  basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (func)->next_bb;
+
+  for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
+       gsi_next (&gsi))
+    {
+      gimple *stmt = gsi_stmt (gsi);
+
+      if (gimple_code (stmt) != GIMPLE_CALL)
+	continue;
+
+      tree callee_decl = gimple_call_fndecl (stmt);
+      cgraph_node *callee_node = cgraph_node::get (callee_decl);
+      if (!path_exists (callee_node, w_cnode))
+	continue;
+
+      callees.safe_push (callee_node);
+    }
+
+  return callees;
+}
+
+/* Return true if n2 is reachable from n1 by visiting only first basic 
blocks */
+static bool
+reach_nodes_via_bb1_dfs (hash_set<cgraph_node *> *visited, cgraph_node *n1,
+			 cgraph_node *n2)
+{
+  if (n1 == n2)
+    return true;
+
+  visited->add (n1);
+
+  vec<struct cgraph_node *> callees = get_bb1_callees (n1, n2);
+
+  int i;
+  cgraph_node *callee;
+  FOR_EACH_VEC_ELT (callees, i, callee)
+    {
+      if (!visited->contains (callee))
+	{
+	  bool found = reach_nodes_via_bb1_dfs (visited, callee, n2);
+	  if (found)
+	    {
+	      callees.release ();
+	      return true;
+	    }
+	}
+    }
+
+  return false;
+}
+
+/* Returns true if n2 is reachable from main function via following 
call graph
+ * nodes reachable within first basic blocks */
+bool
+reach_nodes_via_bb1 (cgraph_node *n2)
+{
+  vec<cgraph_node *> pr = vNULL;
+  cgraph_node *main = get_path_from_main_to (n2, &pr);
+  hash_set<cgraph_node *> visited;
+  bool path_exists = reach_nodes_via_bb1_dfs (&visited, main, n2);
+  visited.empty ();
+  pr.release ();
+  return path_exists;
+}
+
+/* Determine if write occurs before a function call which contains read*/
+static bool
+write_before_call (struct ipa_ref *write, struct ipa_ref *read)
+{
+  symtab_node *w_node = write->referring;
+  cgraph_node *w_cnode = dyn_cast<cgraph_node *> (w_node);
+  symtab_node *r_node = read->referring;
+  cgraph_node *r_cnode = dyn_cast<cgraph_node *> (r_node);
+
+  gcc_assert (path_exists (w_cnode, r_cnode));
+  gcc_assert (w_cnode != r_cnode);
+
+  basic_block w_bb = gimple_bb (write->stmt);
+  basic_block r_bb = gimple_bb (read->stmt);
+
+  gcc_assert (in_lto_p);
+  w_cnode->get_untransformed_body ();
+
+  function *func = DECL_STRUCT_FUNCTION (w_cnode->decl);
+  basic_block c_bb;
+  vec<struct cgraph_node *> callees = vNULL;
+  FOR_EACH_BB_FN (c_bb, func)
+    {
+      bool self_dominates = w_bb == c_bb;
+      bool w_bb_dominates_c_bb = bb1_dominates_bb2 (w_bb, c_bb, w_cnode);
+      if (w_bb_dominates_c_bb && !self_dominates)
+	continue;
+
+      for (gimple_stmt_iterator gsi = gsi_start_bb (c_bb); !gsi_end_p 
(gsi);
+	   gsi_next (&gsi))
+	{
+	  gimple *stmt = gsi_stmt (gsi);
+
+	  if (stmt == write->stmt)
+	    {
+	      break;
+	    }
+
+	  if (gimple_code (stmt) != GIMPLE_CALL)
+	    {
+	      continue;
+	    }
+	  if (dump_file)
+	    print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
+	  tree rhs = gimple_op (stmt, 1);
+
+	  if (rhs && TREE_CODE (rhs) == POINTER_TYPE)
+	    return false;
+
+	  tree callee_decl = gimple_call_fndecl (stmt);
+	  if (!callee_decl)
+	    return false;
+
+	  cgraph_node *callee_node = cgraph_node::get (callee_decl);
+	  const char *identifier
+	    = IDENTIFIER_POINTER (DECL_NAME (callee_node->decl));
+	  const char *sigsetjmp = "sigsetjmp";
+	  if (strstr (identifier, sigsetjmp) != NULL)
+	    return false;
+
+	  if (fndecl_built_in_p (callee_node->decl))
+	    continue;
+
+	  log ("found callee %s\n", callee_node->dump_asm_name ());
+	  callees.safe_push (callee_node);
+	}
+    }
+  cgraph_node *callee;
+  int i;
+  FOR_EACH_VEC_ELT (callees, i, callee)
+    {
+      if (path_exists (callee, r_cnode))
+	{
+	  return false;
+	}
+    }
+
+  return true;
+}
+
+/* Determine if call which contains write occurs before a read*/
+static bool
+call_before_read (struct ipa_ref *write, struct ipa_ref *read)
+{
+  symtab_node *w_node = write->referring;
+  cgraph_node *w_cnode = dyn_cast<cgraph_node *> (w_node);
+  symtab_node *r_node = read->referring;
+  cgraph_node *r_cnode = dyn_cast<cgraph_node *> (r_node);
+
+  gcc_assert (path_exists (r_cnode, w_cnode));
+  gcc_assert (w_cnode != r_cnode);
+
+  basic_block w_bb = gimple_bb (write->stmt);
+  basic_block r_bb = gimple_bb (read->stmt);
+
+  gcc_assert (in_lto_p);
+  r_cnode->get_untransformed_body ();
+
+  function *func = DECL_STRUCT_FUNCTION (r_cnode->decl);
+  basic_block c_bb;
+  FOR_EACH_BB_FN (c_bb, func)
+    {
+      bool self_dominates = c_bb == r_bb;
+      bool call_dominates_read = bb1_dominates_bb2 (c_bb, r_bb, r_cnode);
+      if (!call_dominates_read && !self_dominates)
+	continue;
+
+      for (gimple_stmt_iterator gsi = gsi_start_bb (c_bb); !gsi_end_p 
(gsi);
+	   gsi_next (&gsi))
+	{
+	  gimple *stmt = gsi_stmt (gsi);
+
+	  // self dominance
+	  if (stmt == read->stmt)
+	    break;
+
+	  if (gimple_code (stmt) != GIMPLE_CALL)
+	    continue;
+
+	  if (dump_file)
+	    print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
+	  tree rhs = gimple_op (stmt, 1);
+	  if (TREE_CODE (rhs) == POINTER_TYPE)
+	    return false;
+	  tree callee_decl = gimple_call_fndecl (stmt);
+
+	  cgraph_node *callee_node = cgraph_node::get (callee_decl);
+	  const char *identifier
+	    = IDENTIFIER_POINTER (DECL_NAME (callee_node->decl));
+	  const char *sigsetjmp = "sigsetjmp";
+	  if (strstr (identifier, sigsetjmp) != NULL)
+	    return false;
+
+	  if (path_exists (callee_node, w_cnode))
+	    return true;
+	}
+    }
+  return false;
+}
+
+/* Determine if write occurs before a read*/
+static bool
+write_before_read (struct ipa_ref *write, struct ipa_ref *read)
+{
+  symtab_node *w_node = write->referring;
+  cgraph_node *w_cnode = dyn_cast<cgraph_node *> (w_node);
+  symtab_node *r_node = read->referring;
+  cgraph_node *r_cnode = dyn_cast<cgraph_node *> (r_node);
+
+  if (w_cnode == r_cnode)
+    /* Within the same function.  */
+    return write_before_read_in_function (write, read);
+
+  /* Not within the same function.  */
+  return write_before_read_across_function_borders (write, read);
+}
+
+/* Determine if write occurs before all reads*/
+static bool
+write_before_all_reads (struct ipa_ref *write, const ipa_ref_vec &reads)
+{
+  int i;
+  struct ipa_ref *read;
+
+  FOR_EACH_VEC_ELT (reads, i, read)
+    {
+      load_function_body_of_ipa_ref (read);
+      if (!write_before_read (write, read))
+	return false;
+    }
+  return true;
+}
+
+static void
+propagate_constant_to_read (tree write_val, struct ipa_ref *ref,
+			    hash_set<cgraph_node *> *func)
+{
+  gcc_assert (ref->use == IPA_REF_LOAD);
+  load_function_body_of_ipa_ref (ref);
+
+  gimple *read_stmt = ref->stmt;
+
+  gcc_assert (gimple_code (read_stmt) == GIMPLE_ASSIGN);
+  gcc_assert (gimple_num_ops (read_stmt) == 2);
+
+  tree old_lhs = gimple_op (read_stmt, 0);
+  symtab_node *r_node = ref->referring;
+  cgraph_node *r_cnode = dyn_cast<cgraph_node *> (r_node);
+
+  cgraph_node **possible_clone = func_to_clone->get (r_cnode);
+  if (!possible_clone)
+    {
+      static const char *const suffix = INITCALL_CP_SUFFIX;
+      // callers has to be vNULL, otherwise, we will be
+      // analyzing clones...
+      // and we don't want that...
+      // but that means that we will need to update the callers
+      // later...  in update_callgraph
+      cgraph_node *clone
+	= r_cnode->create_version_clone_with_body (vNULL, NULL, NULL, NULL,
+						   NULL, suffix, NULL);
+      clone->get_untransformed_body ();
+      func_to_clone->put (r_cnode, clone);
+    }
+
+  possible_clone = func_to_clone->get (r_cnode);
+  cgraph_node *clone = *possible_clone;
+
+  // Build new stmt and replace old.
+  gimple_stmt_iterator gsi;
+  basic_block bb;
+  // Let's create a clone with body here...
+  // The clone should not have callers as
+  // to not interfere with the current
+  // analysis.
+  // The callers will be set at the end...
+
+  push_cfun (DECL_STRUCT_FUNCTION (clone->decl));
+  function *clone_func = DECL_STRUCT_FUNCTION (clone->decl);
+  bool found = false;
+  FOR_EACH_BB_FN (bb, clone_func)
+    {
+      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+	{
+	  gimple *stmt = gsi_stmt (gsi);
+	  if (gimple_code (stmt) != GIMPLE_ASSIGN)
+	    continue;
+	  tree old_val_clone = gimple_op (stmt, 1);
+	  tree lhs = gimple_op (stmt, 0);
+
+	  if (TREE_CODE (old_val_clone) != VAR_DECL)
+	    continue;
+
+	  tree old_val = gimple_op (read_stmt, 1);
+	  if (IDENTIFIER_POINTER (DECL_NAME (old_val_clone))
+	      != IDENTIFIER_POINTER (DECL_NAME (old_val)))
+	    continue;
+
+	  found = true;
+
+	  gimple_stmt_iterator gsi2 = gsi_for_stmt (stmt);
+	  tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
+	  gimple *new_read_stmt = gimple_build_assign (new_lhs, write_val);
+	  gsi_insert_before (&gsi2, new_read_stmt, GSI_SAME_STMT);
+
+	  gimple *use_stmt;
+	  imm_use_iterator use_iter;
+	  FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
+	    {
+	      use_operand_p use_p;
+	      FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
+		SET_USE (use_p, new_lhs);
+	      update_stmt (use_stmt);
+	    }
+	}
+
+      if (found)
+	break;
+    }
+  gcc_assert (found);
+  pop_cfun ();
+}
+
+static bool
+ipa_initcall_get_writes_and_reads (varpool_node *vnode, ipa_ref_vec 
*writes,
+				   ipa_ref_vec *reads)
+{
+  int i;
+  struct ipa_ref *ref;
+
+  log ("%s for variable '%s'.\n", __func__, vnode->name ());
+
+  /* Only IPA_REF_STORE and IPA_REF_LOAD left.  */
+  for (i = 0; vnode->iterate_referring (i, ref); i++)
+    {
+      symtab_node *f_node = ref->referring;
+      cgraph_node *f_cnode = dyn_cast<cgraph_node *> (f_node);
+
+      if (!f_cnode)
+	{
+	  log ("skipping variable %s due to static initialization\n",
+	       vnode->name ());
+	  return false;
+	}
+      // it is possible that f_cnode is NULL if the dyn_cast fails.
+      // If the dyn_cast fails, this is an example of static 
initialization.
+      const char *identifier = IDENTIFIER_POINTER (DECL_NAME 
(f_cnode->decl));
+      static const char *const suffix = INITCALL_CP_SUFFIX;
+      if (strstr (identifier, suffix) != NULL)
+	continue;
+
+      if (ref->use == IPA_REF_STORE)
+	writes->safe_push (ref);
+
+      if (ref->use == IPA_REF_LOAD)
+	reads->safe_push (ref);
+    }
+  return true;
+}
+
+static void
+propagate_constant_to_reads (tree write_val, const ipa_ref_vec 
&reads_original,
+			     hash_set<cgraph_node *> *funcs)
+{
+  /* Iterate over reads and replace them by constant.  */
+  struct ipa_ref *ref;
+  int i;
+  FOR_EACH_VEC_ELT (reads_original, i, ref)
+    {
+      propagate_constant_to_read (write_val, ref, funcs);
+    }
+}
+
+/*
+ * Extracts the assigned constant, iff the given statement
+ * is a constant assignment.  Returns NULL_TREE otherwise.
+ */
+static tree
+extract_constant_from_initcall_write (struct ipa_ref *write)
+{
+  symtab_node *f_node = write->referring;
+  cgraph_node *f_cnode = dyn_cast<cgraph_node *> (f_node);
+
+  tree decl = f_cnode->decl;
+  if (TREE_CODE (decl) != FUNCTION_DECL)
+    {
+      log ("Not function decl -> skipping.\n");
+      return NULL_TREE;
+    }
+
+  if (!f_cnode->has_gimple_body_p ())
+    log ("Does NOT have gimple body!\n");
+  if (f_cnode->inlined_to)
+    log ("Inlined To\n");
+
+  log ("%s: for writer gimple:\n", __func__);
+  dump_vnode_ipa_ref (write);
+
+  /* Get the write function's body.  */
+  load_function_body_of_ipa_ref (write);
+
+  gimple *stmt = write->stmt;
+
+  /* Verify that we have an assignment.  */
+  if (gimple_code (stmt) != GIMPLE_ASSIGN)
+    {
+      log ("Write stmt not assignment -> skipping.\n");
+      return NULL_TREE;
+    }
+
+  /* Check if write's LHS (vnode) is not volatile.  */
+  tree lhs = gimple_assign_lhs (stmt);
+  if (TREE_THIS_VOLATILE (TREE_TYPE (lhs)))
+    {
+      log ("Variable volatile -> skipping.\n");
+      return NULL_TREE;
+    }
+
+  /* Check if RHS of write stmt is constant.  */
+  if (!gimple_assign_is_single_const (stmt))
+    {
+      log ("Found non-const operands.\n");
+      return NULL_TREE;
+    }
+
+  /* Extract the constant.  */
+  tree write_val = gimple_op (stmt, 1);
+
+  if (dump_file)
+    {
+      log ("Const op:\n");
+      dump_node (write_val, TDF_DETAILS, dump_file);
+    }
+
+  return write_val;
+}
+
+static void
+ipa_initcall_cp_execute_for_var (varpool_node *vnode,
+				 hash_set<cgraph_node *> *update_functions)
+{
+  ipa_ref_vec writes = vNULL;
+  ipa_ref_vec reads = vNULL;
+  struct ipa_ref *write;
+  tree write_val;
+  gimple_stmt_iterator gsi;
+  bool remove_permanently;
+
+  log ("%s for variable '%s'.\n", __func__, vnode->name ());
+
+  if (dump_file)
+    {
+      dump_node (vnode->decl, TDF_DETAILS, dump_file);
+    }
+
+  /* Variable must not be externally visible.  */
+  if (vnode->externally_visible_p ())
+    {
+      log ("\texternally visible -> skipping\n");
+      return;
+    }
+
+  /* All refs must be explicit.  */
+  if (!vnode->all_refs_explicit_p ())
+    {
+      log ("Not explicit variable refs -> skipping.\n");
+      return;
+    }
+
+  /* Check if any ref->use is IPA_REF_ALIAS.  */
+  if (vnode->has_aliases_p ())
+    {
+      log ("Found IPA_REF_ALIAS -> skipping.\n");
+      return;
+    }
+
+  /* Check if any ref->use is IPA_REF_ADDR.  */
+  if (vnode->alias || vnode->address_matters_p ())
+    {
+      log ("Found IPA_REF_ADDR -> skipping.\n");
+      return;
+    }
+
+  /* We don't touch arrays.  */
+  if (TREE_CODE (TREE_TYPE (vnode->decl)) == ARRAY_TYPE)
+    {
+      log ("Variable is array -> skipping.\n");
+      return;
+    }
+
+  /* We don't touch structs.  */
+  if (TREE_CODE (TREE_TYPE (vnode->decl)) == RECORD_TYPE)
+    {
+      log ("Variable is struct -> skipping.\n");
+      return;
+    }
+
+  /* We don't touch unions.  */
+  if (TREE_CODE (TREE_TYPE (vnode->decl)) == UNION_TYPE)
+    {
+      log ("Variable is union -> skipping.\n");
+      return;
+    }
+
+  /* Get all refs (must be REF_STORE or REF_LOAD).  */
+  bool continue_work
+    = ipa_initcall_get_writes_and_reads (vnode, &writes, &reads);
+  if (!continue_work)
+    {
+      log ("Static initialization -> skipping.\n");
+      writes.release ();
+      reads.release ();
+      return;
+    }
+
+  if (writes.length () > 1)
+    {
+      /* More than one writer.  */
+      log ("More than one writer -> skipping.\n");
+      writes.release ();
+      reads.release ();
+      return;
+    }
+  else if (writes.length () < 1)
+    {
+      /* No writer.  */
+      log ("No writer -> skipping.\n");
+      writes.release ();
+      reads.release ();
+      return;
+    }
+
+  /*
+   * Note:
+   * Limiting ourselves to only one write is not necessary in general,
+   * but good enough to address the typical init () case.
+   * Big advantage is, that it makes the following code much easier.
+   */
+
+  /* Get (only) write ref.  */
+  write = writes.pop ();
+
+  /* Check if write's RHS is a constant and get it.  */
+  write_val = extract_constant_from_initcall_write (write);
+  if (write_val == NULL_TREE)
+    {
+      log ("Write's RHS is not constant -> skipping.\n");
+      writes.release ();
+      reads.release ();
+      return;
+    }
+
+  /* Assure all reads are after the write.  */
+  if (!write_before_all_reads (write, reads))
+    {
+      log ("Write not guaranteed to be before read -> skipping.\n");
+      writes.release ();
+      reads.release ();
+      return;
+    }
+
+  /* Propagate constant to reads.  */
+  if (!flag_initcall_cp_dryrun)
+    propagate_constant_to_reads (write_val, reads, update_functions);
+
+  /* Finally remove the write.  */
+  gsi = gsi_for_stmt (write->stmt);
+  remove_permanently = false; // XXX: fails with true?
+  // gsi_remove (&gsi, remove_permanently);
+
+  log ("Eliminated variable '%s'.\n\n", vnode->name ());
+
+  writes.release ();
+  reads.release ();
+}
+
+/* Replace functions with clones */
+bool
+update_callgraph (cgraph_node *const &r_cnode, cgraph_node **clone_ptr, 
void *)
+{
+  vec<cgraph_edge *> callers = r_cnode->collect_callers ();
+  cgraph_node *clone = *clone_ptr;
+  cgraph_edge *e;
+  int i;
+  profile_count count = r_cnode->count;
+
+  FOR_EACH_VEC_ELT (callers, i, e)
+    e->redirect_callee (clone);
+
+  for (e = clone->callers; e; e = e->next_caller)
+    {
+      e->caller->get_untransformed_body ();
+      function *inner_function = DECL_STRUCT_FUNCTION (e->caller->decl);
+      gimple_call_set_fndecl (e->call_stmt, clone->decl);
+      maybe_clean_eh_stmt_fn (inner_function, e->call_stmt);
+    }
+
+  r_cnode->skip_ipa_cp = true;
+  push_cfun (DECL_STRUCT_FUNCTION (r_cnode->decl));
+
+  log ("dumping function %s\n", r_cnode->dump_asm_name ());
+  function *func = DECL_STRUCT_FUNCTION (r_cnode->decl);
+  basic_block bb;
+  FOR_EACH_BB_FN (bb, func)
+    {
+      for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
+	   gsi_next (&gsi))
+	{
+	  gimple *stmt = gsi_stmt (gsi);
+	  if (dump_file)
+	    print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
+	}
+    }
+  if (dom_info_available_p (CDI_DOMINATORS))
+    free_dominance_info (CDI_DOMINATORS);
+  pop_cfun ();
+  return true;
+}
+
+static unsigned int
+ipa_initcall_cp_execute (void)
+{
+  varpool_node *vnode;
+
+  clone_num_suffixes1 = new hash_map<const char *, unsigned>;
+  hash_set<cgraph_node *> update_functions;
+  func_to_clone = new hash_map<cgraph_node *, cgraph_node *>;
+  FOR_EACH_VARIABLE (vnode)
+    {
+      ipa_initcall_cp_execute_for_var (vnode, &update_functions);
+    }
+
+  if (!flag_initcall_cp_dryrun)
+    func_to_clone->traverse<void *, update_callgraph> (NULL);
+
+  delete clone_num_suffixes1;
+  delete func_to_clone;
+
+  return 0;
+}
+
+namespace {
+
+const pass_data pass_data_ipa_initcall_cp = {
+  SIMPLE_IPA_PASS,
+  "initcall-cp",
+  OPTGROUP_NONE,
+  TV_NONE,
+  (PROP_cfg | PROP_ssa),
+  0,
+  0,
+  0,
+  (TODO_update_ssa | TODO_cleanup_cfg | TODO_dump_symtab
+   | TODO_rebuild_cgraph_edges | TODO_discard_function),
+};
+
+class pass_ipa_initcall_cp : public ipa_opt_pass_d
+{
+public:
+  pass_ipa_initcall_cp (gcc::context *ctxt)
+    : ipa_opt_pass_d (pass_data_ipa_initcall_cp, ctxt, NULL, NULL, 
NULL, NULL,
+		      NULL, NULL, 0, NULL, NULL)
+  {}
+
+  /* opt_pass methods: */
+  virtual bool gate (function *)
+  {
+    return in_lto_p && (flag_initcall_cp || flag_initcall_cp_dryrun);
+  }
+
+  virtual unsigned int execute (function *)
+  {
+    return ipa_initcall_cp_execute ();
+  }
+
+}; // class pass_ipa_initcall_cp
+
+} // namespace
+
+ipa_opt_pass_d *
+make_pass_ipa_initcall_cp (gcc::context *ctxt)
+{
+  return new pass_ipa_initcall_cp (ctxt);
+}
diff --git a/gcc/passes.def b/gcc/passes.def
index 2bf2cb78fc5..62609951bac 100644
--- a/gcc/passes.def
+++ b/gcc/passes.def
@@ -149,6 +149,7 @@  along with GCC; see the file COPYING3.  If not see
    NEXT_PASS (pass_ipa_profile);
    NEXT_PASS (pass_ipa_icf);
    NEXT_PASS (pass_ipa_devirt);
+  NEXT_PASS (pass_ipa_initcall_cp);
    NEXT_PASS (pass_ipa_cp);
    NEXT_PASS (pass_ipa_sra);
    NEXT_PASS (pass_ipa_cdtor_merge);
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index a1207a20a3c..65e09c657b7 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -501,6 +501,7 @@  extern ipa_opt_pass_d *make_pass_ipa_fn_summary 
(gcc::context *ctxt);
  extern ipa_opt_pass_d *make_pass_ipa_inline (gcc::context *ctxt);
  extern simple_ipa_opt_pass *make_pass_ipa_free_lang_data (gcc::context 
*ctxt);
  extern simple_ipa_opt_pass *make_pass_ipa_free_fn_summary 
(gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_initcall_cp (gcc::context *ctxt);