diff mbox

19/n: trans-mem: compiler tree/gimple stuff

Message ID 4EB2EACC.8050307@redhat.com
State New
Headers show

Commit Message

Aldy Hernandez Nov. 3, 2011, 7:26 p.m. UTC
These are misc tree and gimple patches, which I consider front-ish-end 
changes.

require
@@ -487,6 +488,15 @@ struct GTY(()) gimple_statement_eh_filte
    gimple_seq failure;
  };

+/* GIMPLE_EH_ELSE */
+
+struct GTY(()) gimple_statement_eh_else {
+  /* [ WORD 1-4 ]  */
+  struct gimple_statement_base gsbase;
+
+  /* [ WORD 5,6 ] */
+  gimple_seq n_body, e_body;
+};

  /* GIMPLE_EH_MUST_NOT_THROW */

@@ -757,6 +767,43 @@ struct GTY(()) gimple_statement_omp_atom
    tree val;
  };

+/* GIMPLE_TRANSACTION.  */
+
+/* Bits to be stored in the GIMPLE_TRANSACTION subcode.  */
+
+/* The __transaction_atomic was declared [[outer]] or it is
+   __transaction_relaxed.  */
+#define GTMA_IS_OUTER			(1u << 0)
+#define GTMA_IS_RELAXED			(1u << 1)
+#define GTMA_DECLARATION_MASK		(GTMA_IS_OUTER | GTMA_IS_RELAXED)
+
+/* The transaction is seen to not have an abort.  */
+#define GTMA_HAVE_ABORT			(1u << 2)
+/* The transaction is seen to have loads or stores.  */
+#define GTMA_HAVE_LOAD			(1u << 3)
+#define GTMA_HAVE_STORE			(1u << 4)
+/* The transaction MAY enter serial irrevocable mode in its dynamic 
scope.  */
+#define GTMA_MAY_ENTER_IRREVOCABLE	(1u << 5)
+/* The transaction WILL enter serial irrevocable mode.
+   An irrevocable block post-dominates the entire transaction, such
+   that all invocations of the transaction will go serial-irrevocable.
+   In such case, we don't bother instrumenting the transaction, and
+   tell the runtime that it should begin the transaction in
+   serial-irrevocable mode.  */
+#define GTMA_DOES_GO_IRREVOCABLE	(1u << 6)
+
+struct GTY(()) gimple_statement_transaction
+{
+  /* [ WORD 1-10 ]  */
+  struct gimple_statement_with_memory_ops_base gsbase;
+
+  /* [ WORD 11 ] */
+  gimple_seq body;
+
+  /* [ WORD 12 ] */
+  tree label;
+};
+
  #define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP)	SYM,
  enum gimple_statement_structure_enum {
  #include "gsstruct.def"
@@ -779,6 +826,7 @@ union GTY ((desc ("gimple_statement_stru
    struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch;
    struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) 
gimple_eh_filter;
    struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt;
+  struct gimple_statement_eh_else GTY ((tag ("GSS_EH_ELSE"))) 
gimple_eh_else;
    struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi;
    struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) 
gimple_eh_ctrl;
    struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try;
@@ -793,6 +841,7 @@ union GTY ((desc ("gimple_statement_stru
    struct gimple_statement_omp_continue GTY ((tag 
("GSS_OMP_CONTINUE"))) gimple_omp_continue;
    struct gimple_statement_omp_atomic_load GTY ((tag 
("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load;
    struct gimple_statement_omp_atomic_store GTY ((tag 
("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store;
+  struct gimple_statement_transaction GTY((tag ("GSS_TRANSACTION"))) 
gimple_transaction;
  };

  /* In gimple.c.  */
@@ -846,6 +895,7 @@ gimple gimple_build_asm_vec (const char
  gimple gimple_build_catch (tree, gimple_seq);
  gimple gimple_build_eh_filter (tree, gimple_seq);
  gimple gimple_build_eh_must_not_throw (tree);
+gimple gimple_build_eh_else (gimple_seq, gimple_seq);
  gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags);
  gimple gimple_build_wce (gimple_seq);
  gimple gimple_build_resx (int);
@@ -868,6 +918,7 @@ gimple gimple_build_omp_single (gimple_s
  gimple gimple_build_cdt (tree, tree);
  gimple gimple_build_omp_atomic_load (tree, tree);
  gimple gimple_build_omp_atomic_store (tree);
+gimple gimple_build_transaction (gimple_seq, tree);
  gimple gimple_build_predict (enum br_predictor, enum prediction);
  enum gimple_statement_structure_enum gss_for_assign (enum tree_code);
  void sort_case_labels (VEC(tree,heap) *);
@@ -986,6 +1037,7 @@ extern bool walk_stmt_load_store_ops (gi
  				      bool (*)(gimple, tree, void *),
  				      bool (*)(gimple, tree, void *));
  extern bool gimple_ior_addresses_taken (bitmap, gimple);
+extern const_tree strip_invariant_refs (const_tree);
  extern bool gimple_call_builtin_p (gimple, enum built_in_function);
  extern bool gimple_asm_clobbers_memory_p (const_gimple);

@@ -1077,6 +1129,9 @@ extern tree canonicalize_cond_expr_cond
  /* In omp-low.c.  */
  extern tree omp_reduction_init (tree, tree);

+/* In trans-mem.c.  */
+extern void diagnose_tm_safe_errors (tree);
+
  /* In tree-nested.c.  */
  extern void lower_nested_functions (tree);
  extern void insert_field_into_struct (tree, tree);
@@ -1135,6 +1190,7 @@ gimple_has_substatements (gimple g)
      case GIMPLE_BIND:
      case GIMPLE_CATCH:
      case GIMPLE_EH_FILTER:
+    case GIMPLE_EH_ELSE:
      case GIMPLE_TRY:
      case GIMPLE_OMP_FOR:
      case GIMPLE_OMP_MASTER:
@@ -1146,6 +1202,7 @@ gimple_has_substatements (gimple g)
      case GIMPLE_OMP_SINGLE:
      case GIMPLE_OMP_CRITICAL:
      case GIMPLE_WITH_CLEANUP_EXPR:
+    case GIMPLE_TRANSACTION:
        return true;

      default:
@@ -2436,6 +2493,22 @@ gimple_call_alloca_for_var_p (gimple s)
    return (s->gsbase.subcode & GF_CALL_ALLOCA_FOR_VAR) != 0;
  }

+/* Return true if S is a noinline call.  */
+
+static inline bool
+gimple_call_noinline_p (gimple s)
+{
+  GIMPLE_CHECK (s, GIMPLE_CALL);
+  return (s->gsbase.subcode & GF_CALL_NOINLINE) != 0;
+}
+
+static inline void
+gimple_call_set_noinline_p (gimple s)
+{
+  GIMPLE_CHECK (s, GIMPLE_CALL);
+  s->gsbase.subcode |= GF_CALL_NOINLINE;
+}
+
  /* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL.  */

  static inline void
@@ -3178,6 +3251,35 @@ gimple_eh_must_not_throw_set_fndecl (gim
    gs->gimple_eh_mnt.fndecl = decl;
  }

+/* GIMPLE_EH_ELSE accessors.  */
+
+static inline gimple_seq
+gimple_eh_else_n_body (gimple gs)
+{
+  GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
+  return gs->gimple_eh_else.n_body;
+}
+
+static inline gimple_seq
+gimple_eh_else_e_body (gimple gs)
+{
+  GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
+  return gs->gimple_eh_else.e_body;
+}
+
+static inline void
+gimple_eh_else_set_n_body (gimple gs, gimple_seq seq)
+{
+  GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
+  gs->gimple_eh_else.n_body = seq;
+}
+
+static inline void
+gimple_eh_else_set_e_body (gimple gs, gimple_seq seq)
+{
+  GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
+  gs->gimple_eh_else.e_body = seq;
+}

  /* GIMPLE_TRY accessors. */

@@ -4556,6 +4658,67 @@ gimple_omp_continue_set_control_use (gim
    g->gimple_omp_continue.control_use = use;
  }

+/* Return the body for the GIMPLE_TRANSACTION statement GS.  */
+
+static inline gimple_seq
+gimple_transaction_body (gimple gs)
+{
+  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+  return gs->gimple_transaction.body;
+}
+
+/* Return the label associated with a GIMPLE_TRANSACTION.  */
+
+static inline tree
+gimple_transaction_label (const_gimple gs)
+{
+  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+  return gs->gimple_transaction.label;
+}
+
+static inline tree *
+gimple_transaction_label_ptr (gimple gs)
+{
+  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+  return &gs->gimple_transaction.label;
+}
+
+/* Return the subcode associated with a GIMPLE_TRANSACTION.  */
+
+static inline unsigned int
+gimple_transaction_subcode (const_gimple gs)
+{
+  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+  return gs->gsbase.subcode;
+}
+
+/* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS.  */
+
+static inline void
+gimple_transaction_set_body (gimple gs, gimple_seq body)
+{
+  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+  gs->gimple_transaction.body = body;
+}
+
+/* Set the label associated with a GIMPLE_TRANSACTION.  */
+
+static inline void
+gimple_transaction_set_label (gimple gs, tree label)
+{
+  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+  gs->gimple_transaction.label = label;
+}
+
+/* Set the subcode associated with a GIMPLE_TRANSACTION.  */
+
+static inline void
+gimple_transaction_set_subcode (gimple gs, unsigned int subcode)
+{
+  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+  gs->gsbase.subcode = subcode;
+}
+

  /* Return a pointer to the return value for GIMPLE_RETURN GS.  */

@@ -4982,6 +5145,12 @@ struct walk_stmt_info
       will be visited more than once.  */
    struct pointer_set_t *pset;

+  /* Operand returned by the callbacks.  This is set when calling
+     walk_gimple_seq.  If the walk_stmt_fn or walk_tree_fn callback
+     returns non-NULL, this field will contain the tree returned by
+     the last callback.  */
+  tree callback_result;
+
    /* Indicates whether the operand being examined may be replaced
       with something that matches is_gimple_val (if true) or something
       slightly more complicated (if false).  "Something" technically
@@ -4994,23 +5163,20 @@ struct walk_stmt_info
       statement 'foo (&var)', the flag VAL_ONLY will initially be set
       to true, however, when walking &var, the operand of that
       ADDR_EXPR does not need to be a GIMPLE value.  */
-  bool val_only;
+  BOOL_BITFIELD val_only : 1;

    /* True if we are currently walking the LHS of an assignment.  */
-  bool is_lhs;
+  BOOL_BITFIELD is_lhs : 1;

    /* Optional.  Set to true by the callback functions if they made any
       changes.  */
-  bool changed;
+  BOOL_BITFIELD changed : 1;

    /* True if we're interested in location information.  */
-  bool want_locations;
+  BOOL_BITFIELD want_locations : 1;

-  /* Operand returned by the callbacks.  This is set when calling
-     walk_gimple_seq.  If the walk_stmt_fn or walk_tree_fn callback
-     returns non-NULL, this field will contain the tree returned by
-     the last callback.  */
-  tree callback_result;
+  /* True if we've removed the statement that was processed.  */
+  BOOL_BITFIELD removed_stmt : 1;
  };

  /* Callback for walk_gimple_stmt.  Called for every statement found

Comments

Richard Biener Nov. 4, 2011, 10:36 a.m. UTC | #1
On Thu, Nov 3, 2011 at 8:26 PM, Aldy Hernandez <aldyh@redhat.com> wrote:
> These are misc tree and gimple patches, which I consider front-ish-end
> changes.
>
> Index: gcc/tree.c
> ===================================================================
> --- gcc/tree.c  (.../trunk)     (revision 180744)
> +++ gcc/tree.c  (.../branches/transactional-memory)     (revision 180773)
> @@ -9594,6 +9594,9 @@ build_common_builtin_nodes (void)
>                                    integer_type_node, NULL_TREE);
>   local_define_builtin ("__builtin_eh_pointer", ftype, BUILT_IN_EH_POINTER,
>                        "__builtin_eh_pointer", ECF_PURE | ECF_NOTHROW |
> ECF_LEAF);
> +  if (flag_tm)
> +    apply_tm_attr (builtin_decl_explicit (BUILT_IN_EH_POINTER),
> +                  get_identifier ("transaction_pure"));

I think this should use a new ECF_TM_PURE flag, unconditionally set
with handling in the functions that handle/return ECF flags so that
transitioning this to a tree node flag instead of an attribute is easier.

>   tmp = lang_hooks.types.type_for_mode (targetm.eh_return_filter_mode (),
> 0);
>   ftype = build_function_type_list (tmp, integer_type_node, NULL_TREE);
> Index: gcc/tree.h
> ===================================================================
> --- gcc/tree.h  (.../trunk)     (revision 180744)
> +++ gcc/tree.h  (.../branches/transactional-memory)     (revision 180773)
> @@ -539,6 +539,9 @@ struct GTY(()) tree_common {
>        ENUM_IS_SCOPED in
>           ENUMERAL_TYPE
>
> +       TRANSACTION_EXPR_OUTER in
> +           TRANSACTION_EXPR
> +
>    public_flag:
>
>        TREE_OVERFLOW in
> @@ -566,6 +569,9 @@ struct GTY(()) tree_common {
>        OMP_CLAUSE_PRIVATE_DEBUG in
>            OMP_CLAUSE_PRIVATE
>
> +       TRANSACTION_EXPR_RELAXED in
> +           TRANSACTION_EXPR
> +
>    private_flag:
>
>        TREE_PRIVATE in
> @@ -1808,6 +1814,14 @@ extern void protected_set_expr_location
>  #define CALL_EXPR_ARGP(NODE) \
>   (&(TREE_OPERAND (CALL_EXPR_CHECK (NODE), 0)) + 3)
>
> +/* TM directives and accessors.  */
> +#define TRANSACTION_EXPR_BODY(NODE) \
> +  TREE_OPERAND (TRANSACTION_EXPR_CHECK (NODE), 0)
> +#define TRANSACTION_EXPR_OUTER(NODE) \
> +  (TRANSACTION_EXPR_CHECK (NODE)->base.static_flag)
> +#define TRANSACTION_EXPR_RELAXED(NODE) \
> +  (TRANSACTION_EXPR_CHECK (NODE)->base.public_flag)
> +
>  /* OpenMP directive and clause accessors.  */
>
>  #define OMP_BODY(NODE) \
> @@ -3452,6 +3466,34 @@ struct GTY(())
>  #define DECL_NO_INLINE_WARNING_P(NODE) \
>   (FUNCTION_DECL_CHECK (NODE)->function_decl.no_inline_warning_flag)
>
> +/* Nonzero in a FUNCTION_DECL means this function is the transactional
> +   clone of a function - called only from inside transactions.  */
> +#define DECL_IS_TM_CLONE(NODE) \
> +  (FUNCTION_DECL_CHECK (NODE)->function_decl.tm_clone_flag)

Why is it necessary to know whether a clone is a tm clone?

> +/* Nonzero if a FUNCTION_CODE is a TM load/store.  */
> +#define BUILTIN_TM_LOAD_STORE_P(FN) \
> +  ((FN) >= BUILT_IN_TM_STORE_1 && (FN) <= BUILT_IN_TM_LOAD_RFW_LDOUBLE)
> +
> +/* Nonzero if a FUNCTION_CODE is a TM load.  */
> +#define BUILTIN_TM_LOAD_P(FN) \
> +  ((FN) >= BUILT_IN_TM_LOAD_1 && (FN) <= BUILT_IN_TM_LOAD_RFW_LDOUBLE)
> +
> +/* Nonzero if a FUNCTION_CODE is a TM store.  */
> +#define BUILTIN_TM_STORE_P(FN) \
> +  ((FN) >= BUILT_IN_TM_STORE_1 && (FN) <= BUILT_IN_TM_STORE_WAW_LDOUBLE)
> +
> +#define CASE_BUILT_IN_TM_LOAD(FN)      \
> +  case BUILT_IN_TM_LOAD_##FN:          \
> +  case BUILT_IN_TM_LOAD_RAR_##FN:      \
> +  case BUILT_IN_TM_LOAD_RAW_##FN:      \
> +  case BUILT_IN_TM_LOAD_RFW_##FN
> +
> +#define CASE_BUILT_IN_TM_STORE(FN)     \
> +  case BUILT_IN_TM_STORE_##FN:         \
> +  case BUILT_IN_TM_STORE_WAR_##FN:     \
> +  case BUILT_IN_TM_STORE_WAW_##FN
> +
>  /* Nonzero in a FUNCTION_DECL that should be always inlined by the inliner
>    disregarding size and cost heuristics.  This is equivalent to using
>    the always_inline attribute without the required diagnostics if the
> @@ -3539,8 +3581,9 @@ struct GTY(()) tree_function_decl {
>   unsigned pure_flag : 1;
>   unsigned looping_const_or_pure_flag : 1;
>   unsigned has_debug_args_flag : 1;
> +  unsigned tm_clone_flag : 1;
>
> -  /* 2 bits left */
> +  /* 1 bit left */
>  };
>
>  /* The source language of the translation-unit.  */
> @@ -5174,6 +5217,8 @@ extern void expand_return (tree);
>
>  /* In tree-eh.c */
>  extern void using_eh_for_cleanups (void);
> +extern int struct_ptr_eq (const void *, const void *);
> +extern hashval_t struct_ptr_hash (const void *);
>
>  /* In fold-const.c */
>
> @@ -5543,6 +5588,8 @@ extern tree build_duplicate_type (tree);
>  #define ECF_NOVOPS               (1 << 9)
>  /* The function does not lead to calls within current function unit.  */
>  #define ECF_LEAF                 (1 << 10)
> +/* Nonzero if this call performs a transactional memory operation.  */
> +#define ECF_TM_OPS               (1 << 11)

What's this flag useful for?  Isn't it the case that you want to conservatively
know whether a call might perform a tm operation?  Thus, the flag
should be inverted?  Is this the same as "TM pure"?

>  extern int flags_from_decl_or_type (const_tree);
>  extern int call_expr_flags (const_tree);
> @@ -5593,6 +5640,8 @@ extern void init_attributes (void);
>    a decl attribute to the declaration rather than to its type).  */
>  extern tree decl_attributes (tree *, tree, int);
>
> +extern void apply_tm_attr (tree, tree);
> +
>  /* In integrate.c */
>  extern void set_decl_abstract_flags (tree, int);
>  extern void set_decl_origin_self (tree);
> @@ -5805,6 +5854,21 @@ extern unsigned HOST_WIDE_INT compute_bu
>  extern unsigned HOST_WIDE_INT highest_pow2_factor (const_tree);
>  extern tree build_personality_function (const char *);
>
> +/* In trans-mem.c.  */
> +extern tree build_tm_abort_call (location_t, bool);
> +extern bool is_tm_safe (const_tree);
> +extern bool is_tm_pure (const_tree);
> +extern bool is_tm_may_cancel_outer (tree);
> +extern bool is_tm_ending_fndecl (tree);
> +extern void record_tm_replacement (tree, tree);
> +extern void tm_malloc_replacement (tree);
> +
> +static inline bool
> +is_tm_safe_or_pure (tree x)

const_tree

> +{
> +  return is_tm_safe (x) || is_tm_pure (x);
> +}
> +
>  /* In tree-inline.c.  */
>
>  void init_inline_once (void);
> Index: gcc/attribs.c
> ===================================================================
> --- gcc/attribs.c       (.../trunk)     (revision 180744)
> +++ gcc/attribs.c       (.../branches/transactional-memory)     (revision
> 180773)
> @@ -166,7 +166,8 @@ init_attributes (void)
>          gcc_assert (strcmp (attribute_tables[i][j].name,
>                              attribute_tables[i][k].name));
>     }
> -  /* Check that no name occurs in more than one table.  */
> +  /* Check that no name occurs in more than one table.  Names that
> +     begin with '*' are exempt, and may be overridden.  */
>   for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
>     {
>       size_t j, k, l;
> @@ -174,8 +175,9 @@ init_attributes (void)
>       for (j = i + 1; j < ARRAY_SIZE (attribute_tables); j++)
>        for (k = 0; attribute_tables[i][k].name != NULL; k++)
>          for (l = 0; attribute_tables[j][l].name != NULL; l++)
> -           gcc_assert (strcmp (attribute_tables[i][k].name,
> -                               attribute_tables[j][l].name));
> +           gcc_assert (attribute_tables[i][k].name[0] == '*'
> +                       || strcmp (attribute_tables[i][k].name,
> +                                  attribute_tables[j][l].name));
>     }
>  #endif
>
> @@ -207,7 +209,7 @@ register_attribute (const struct attribu
>   slot = htab_find_slot_with_hash (attribute_hash, &str,
>                                   substring_hash (str.str, str.length),
>                                   INSERT);
> -  gcc_assert (!*slot);
> +  gcc_assert (!*slot || attr->name[0] == '*');
>   *slot = (void *) CONST_CAST (struct attribute_spec *, attr);
>  }

The above changes seem to belong to a different changeset and look
strange.  Why would attributes ever appear in two different tables?

> @@ -484,3 +486,12 @@ decl_attributes (tree *node, tree attrib
>
>   return returned_attrs;
>  }
> +
> +/* Subroutine of set_method_tm_attributes.  Apply TM attribute ATTR
> +   to the method FNDECL.  */
> +
> +void
> +apply_tm_attr (tree fndecl, tree attr)
> +{
> +  decl_attributes (&TREE_TYPE (fndecl), tree_cons (attr, NULL, NULL), 0);
> +}
> Index: gcc/targhooks.c
> ===================================================================
> --- gcc/targhooks.c     (.../trunk)     (revision 180744)
> +++ gcc/targhooks.c     (.../branches/transactional-memory)     (revision
> 180773)
> @@ -1214,6 +1214,12 @@ default_have_conditional_execution (void
>  #endif
>  }
>
> +tree
> +default_builtin_tm_load_store (tree ARG_UNUSED (type))
> +{
> +  return NULL_TREE;
> +}
> +
>  /* Compute cost of moving registers to/from memory.  */
>
>  int
> Index: gcc/targhooks.h
> ===================================================================
> --- gcc/targhooks.h     (.../trunk)     (revision 180744)
> +++ gcc/targhooks.h     (.../branches/transactional-memory)     (revision
> 180773)
> @@ -152,6 +152,9 @@ extern bool default_addr_space_subset_p
>  extern rtx default_addr_space_convert (rtx, tree, tree);
>  extern unsigned int default_case_values_threshold (void);
>  extern bool default_have_conditional_execution (void);
> +
> +extern tree default_builtin_tm_load_store (tree);
> +
>  extern int default_memory_move_cost (enum machine_mode, reg_class_t, bool);
>  extern int default_register_move_cost (enum machine_mode, reg_class_t,
>                                       reg_class_t);
> Index: gcc/gimple.def
> ===================================================================
> --- gcc/gimple.def      (.../trunk)     (revision 180744)
> +++ gcc/gimple.def      (.../branches/transactional-memory)     (revision
> 180773)
> @@ -124,6 +124,14 @@ DEFGSCODE(GIMPLE_ASM, "gimple_asm", GSS_
>     CHAIN is the optional static chain link for nested functions.  */
>  DEFGSCODE(GIMPLE_CALL, "gimple_call", GSS_CALL)
>
> +/* GIMPLE_TRANSACTION <BODY, LABEL> represents __transaction_atomic and
> +   __transaction_relaxed blocks.
> +   BODY is the sequence of statements inside the transaction.
> +   LABEL is a label for the statement immediately following the
> +   transaction.  This is before RETURN so that it has MEM_OPS,
> +   so that it can clobber global memory.  */
> +DEFGSCODE(GIMPLE_TRANSACTION, "gimple_transaction", GSS_TRANSACTION)
> +
>  /* GIMPLE_RETURN <RETVAL> represents return statements.
>
>    RETVAL is the value to return or NULL.  If a value is returned it
> @@ -151,6 +159,12 @@ DEFGSCODE(GIMPLE_EH_FILTER, "gimple_eh_f
>    be invoked if an exception propagates to this point.  */
>  DEFGSCODE(GIMPLE_EH_MUST_NOT_THROW, "gimple_eh_must_not_throw", GSS_EH_MNT)
>
> +/* GIMPLE_EH_ELSE <N_BODY, E_BODY> must be the sole contents of
> +   a GIMPLE_TRY_FINALLY node.  For all normal exits from the try block,
> +   we N_BODY is run; for all exception exits from the try block,
> +   E_BODY is run.  */
> +DEFGSCODE(GIMPLE_EH_ELSE, "gimple_eh_else", GSS_EH_ELSE)
> +
>  /* GIMPLE_RESX resumes execution after an exception.  */
>  DEFGSCODE(GIMPLE_RESX, "gimple_resx", GSS_EH_CTRL)
>
> Index: gcc/builtin-types.def
> ===================================================================
> --- gcc/builtin-types.def       (.../trunk)     (revision 180744)
> +++ gcc/builtin-types.def       (.../branches/transactional-memory)
> (revision 180773)
> @@ -477,3 +477,24 @@ DEF_FUNCTION_TYPE_VAR_5 (BT_FN_INT_INT_I
>  DEF_POINTER_TYPE (BT_PTR_FN_VOID_VAR, BT_FN_VOID_VAR)
>  DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE,
>                     BT_PTR, BT_PTR_FN_VOID_VAR, BT_PTR, BT_SIZE)
> +
> +
> +DEF_FUNCTION_TYPE_1 (BT_FN_I1_VPTR, BT_I1, BT_VOLATILE_PTR)
> +DEF_FUNCTION_TYPE_1 (BT_FN_I2_VPTR, BT_I2, BT_VOLATILE_PTR)
> +DEF_FUNCTION_TYPE_1 (BT_FN_I4_VPTR, BT_I4, BT_VOLATILE_PTR)
> +DEF_FUNCTION_TYPE_1 (BT_FN_I8_VPTR, BT_I8, BT_VOLATILE_PTR)
> +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_VPTR, BT_FLOAT, BT_VOLATILE_PTR)
> +DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_VPTR, BT_DOUBLE, BT_VOLATILE_PTR)
> +DEF_FUNCTION_TYPE_1 (BT_FN_LDOUBLE_VPTR, BT_LONGDOUBLE, BT_VOLATILE_PTR)
> +
> +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I1, BT_VOID, BT_VOLATILE_PTR, BT_I1)
> +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I2, BT_VOID, BT_VOLATILE_PTR, BT_I2)
> +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I4, BT_VOID, BT_VOLATILE_PTR, BT_I4)
> +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I8, BT_VOID, BT_VOLATILE_PTR, BT_I8)
> +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_FLOAT, BT_VOID, BT_VOLATILE_PTR,
> BT_FLOAT)
> +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_DOUBLE, BT_VOID,
> +                     BT_VOLATILE_PTR, BT_DOUBLE)
> +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_LDOUBLE, BT_VOID,
> +                    BT_VOLATILE_PTR, BT_LONGDOUBLE)
> +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_SIZE, BT_VOID,
> +                    BT_VOLATILE_PTR, BT_SIZE)
> Index: gcc/builtins.def
> ===================================================================
> --- gcc/builtins.def    (.../trunk)     (revision 180744)
> +++ gcc/builtins.def    (.../branches/transactional-memory)     (revision
> 180773)
> @@ -142,6 +142,13 @@ along with GCC; see the file COPYING3.
>                false, true, true, ATTRS, false, \
>               (flag_openmp || flag_tree_parallelize_loops))
>
> +/* Builtin used by the implementation of GNU TM.  These
> +   functions are mapped to the actual implementation of the STM library. */
> +#undef DEF_TM_BUILTIN
> +#define DEF_TM_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
> +  DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE,    \
> +               false, true, true, ATTRS, false, flag_tm)
> +
>  /* Define an attribute list for math functions that are normally
>    "impure" because some of them may write into global memory for
>    `errno'.  If !flag_errno_math they are instead "const".  */
> @@ -624,6 +631,7 @@ DEF_GCC_BUILTIN        (BUILT_IN_APPLY_A
>  DEF_GCC_BUILTIN        (BUILT_IN_BSWAP32, "bswap32", BT_FN_UINT32_UINT32,
> ATTR_CONST_NOTHROW_LEAF_LIST)
>  DEF_GCC_BUILTIN        (BUILT_IN_BSWAP64, "bswap64", BT_FN_UINT64_UINT64,
> ATTR_CONST_NOTHROW_LEAF_LIST)
>  DEF_EXT_LIB_BUILTIN    (BUILT_IN_CLEAR_CACHE, "__clear_cache",
> BT_FN_VOID_PTR_PTR, ATTR_NOTHROW_LEAF_LIST)
> +/* [trans-mem]: Adjust BUILT_IN_TM_CALLOC if BUILT_IN_CALLOC is changed.
>  */
>  DEF_LIB_BUILTIN        (BUILT_IN_CALLOC, "calloc", BT_FN_PTR_SIZE_SIZE,
> ATTR_MALLOC_NOTHROW_LEAF_LIST)
>  DEF_GCC_BUILTIN        (BUILT_IN_CLASSIFY_TYPE, "classify_type",
> BT_FN_INT_VAR, ATTR_LEAF_LIST)
>  DEF_GCC_BUILTIN        (BUILT_IN_CLZ, "clz", BT_FN_INT_UINT,
> ATTR_CONST_NOTHROW_LEAF_LIST)
> @@ -662,6 +670,7 @@ DEF_EXT_LIB_BUILTIN    (BUILT_IN_FFSL, "
>  DEF_EXT_LIB_BUILTIN    (BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLONG,
> ATTR_CONST_NOTHROW_LEAF_LIST)
>  DEF_EXT_LIB_BUILTIN        (BUILT_IN_FORK, "fork", BT_FN_PID,
> ATTR_NOTHROW_LIST)
>  DEF_GCC_BUILTIN        (BUILT_IN_FRAME_ADDRESS, "frame_address",
> BT_FN_PTR_UINT, ATTR_NULL)
> +/* [trans-mem]: Adjust BUILT_IN_TM_FREE if BUILT_IN_FREE is changed.  */
>  DEF_LIB_BUILTIN        (BUILT_IN_FREE, "free", BT_FN_VOID_PTR,
> ATTR_NOTHROW_LIST)
>  DEF_GCC_BUILTIN        (BUILT_IN_FROB_RETURN_ADDR, "frob_return_addr",
> BT_FN_PTR_PTR, ATTR_NULL)
>  DEF_EXT_LIB_BUILTIN    (BUILT_IN_GETTEXT, "gettext",
> BT_FN_STRING_CONST_STRING, ATTR_FORMAT_ARG_1)
> @@ -698,6 +707,7 @@ DEF_GCC_BUILTIN        (BUILT_IN_ISUNORD
>  DEF_LIB_BUILTIN        (BUILT_IN_LABS, "labs", BT_FN_LONG_LONG,
> ATTR_CONST_NOTHROW_LEAF_LIST)
>  DEF_C99_BUILTIN        (BUILT_IN_LLABS, "llabs", BT_FN_LONGLONG_LONGLONG,
> ATTR_CONST_NOTHROW_LEAF_LIST)
>  DEF_GCC_BUILTIN        (BUILT_IN_LONGJMP, "longjmp", BT_FN_VOID_PTR_INT,
> ATTR_NORETURN_NOTHROW_LEAF_LIST)
> +/* [trans-mem]: Adjust BUILT_IN_TM_MALLOC if BUILT_IN_MALLOC is changed.
>  */
>  DEF_LIB_BUILTIN        (BUILT_IN_MALLOC, "malloc", BT_FN_PTR_SIZE,
> ATTR_MALLOC_NOTHROW_LEAF_LIST)
>  DEF_GCC_BUILTIN        (BUILT_IN_NEXT_ARG, "next_arg", BT_FN_PTR_VAR,
> ATTR_LEAF_LIST)
>  DEF_GCC_BUILTIN        (BUILT_IN_PARITY, "parity", BT_FN_INT_UINT,
> ATTR_CONST_NOTHROW_LEAF_LIST)
> @@ -793,3 +803,6 @@ DEF_BUILTIN_STUB (BUILT_IN_EH_COPY_VALUE
>
>  /* OpenMP builtins.  */
>  #include "omp-builtins.def"
> +
> +/* GTM builtins. */
> +#include "gtm-builtins.def"
> Index: gcc/gimple-low.c
> ===================================================================
> --- gcc/gimple-low.c    (.../trunk)     (revision 180744)
> +++ gcc/gimple-low.c    (.../branches/transactional-memory)     (revision
> 180773)
> @@ -396,6 +396,11 @@ lower_stmt (gimple_stmt_iterator *gsi, s
>       lower_sequence (gimple_eh_filter_failure (stmt), data);
>       break;
>
> +    case GIMPLE_EH_ELSE:
> +      lower_sequence (gimple_eh_else_n_body (stmt), data);
> +      lower_sequence (gimple_eh_else_e_body (stmt), data);
> +      break;
> +
>     case GIMPLE_NOP:
>     case GIMPLE_ASM:
>     case GIMPLE_ASSIGN:
> @@ -446,6 +451,10 @@ lower_stmt (gimple_stmt_iterator *gsi, s
>       data->cannot_fallthru = false;
>       return;
>
> +    case GIMPLE_TRANSACTION:
> +      lower_sequence (gimple_transaction_body (stmt), data);
> +      break;
> +
>     default:
>       gcc_unreachable ();
>     }
> @@ -727,6 +736,10 @@ gimple_stmt_may_fallthru (gimple stmt)
>       return (gimple_seq_may_fallthru (gimple_try_eval (stmt))
>              && gimple_seq_may_fallthru (gimple_try_cleanup (stmt)));
>
> +    case GIMPLE_EH_ELSE:
> +      return (gimple_seq_may_fallthru (gimple_eh_else_n_body (stmt))
> +             || gimple_seq_may_fallthru (gimple_eh_else_e_body (stmt)));
> +
>     case GIMPLE_CALL:
>       /* Functions that do not return do not fall through.  */
>       return (gimple_call_flags (stmt) & ECF_NORETURN) == 0;
> Index: gcc/gsstruct.def
> ===================================================================
> --- gcc/gsstruct.def    (.../trunk)     (revision 180744)
> +++ gcc/gsstruct.def    (.../branches/transactional-memory)     (revision
> 180773)
> @@ -38,6 +38,7 @@ DEFGSSTRUCT(GSS_CATCH, gimple_statement_
>  DEFGSSTRUCT(GSS_EH_FILTER, gimple_statement_eh_filter, false)
>  DEFGSSTRUCT(GSS_EH_MNT, gimple_statement_eh_mnt, false)
>  DEFGSSTRUCT(GSS_EH_CTRL, gimple_statement_eh_ctrl, false)
> +DEFGSSTRUCT(GSS_EH_ELSE, gimple_statement_eh_else, false)
>  DEFGSSTRUCT(GSS_WCE, gimple_statement_wce, false)
>  DEFGSSTRUCT(GSS_OMP, gimple_statement_omp, false)
>  DEFGSSTRUCT(GSS_OMP_CRITICAL, gimple_statement_omp_critical, false)
> @@ -49,3 +50,4 @@ DEFGSSTRUCT(GSS_OMP_SINGLE, gimple_state
>  DEFGSSTRUCT(GSS_OMP_CONTINUE, gimple_statement_omp_continue, false)
>  DEFGSSTRUCT(GSS_OMP_ATOMIC_LOAD, gimple_statement_omp_atomic_load, false)
>  DEFGSSTRUCT(GSS_OMP_ATOMIC_STORE, gimple_statement_omp_atomic_store, false)
> +DEFGSSTRUCT(GSS_TRANSACTION, gimple_statement_transaction, false)
> Index: gcc/tree-eh.c
> ===================================================================
> --- gcc/tree-eh.c       (.../trunk)     (revision 180744)
> +++ gcc/tree-eh.c       (.../branches/transactional-memory)     (revision
> 180773)
> @@ -58,7 +58,7 @@ using_eh_for_cleanups (void)
>    pointer.  Assumes all pointers are interchangeable, which is sort
>    of already assumed by gcc elsewhere IIRC.  */
>
> -static int
> +int
>  struct_ptr_eq (const void *a, const void *b)
>  {
>   const void * const * x = (const void * const *) a;
> @@ -66,7 +66,7 @@ struct_ptr_eq (const void *a, const void
>   return *x == *y;
>  }
>
> -static hashval_t
> +hashval_t
>  struct_ptr_hash (const void *a)
>  {
>   const void * const * x = (const void * const *) a;

Rather than exporting those here consider moving them to a common
header as inline functions.

  const void * const * x = (const void * const *) a;
  return (size_t)*x >> 4;

and on the way change that to (intptr_t)*x >> 4

> @@ -284,6 +284,11 @@ collect_finally_tree (gimple stmt, gimpl
>       collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
>       break;
>
> +    case GIMPLE_EH_ELSE:
> +      collect_finally_tree_1 (gimple_eh_else_n_body (stmt), region);
> +      collect_finally_tree_1 (gimple_eh_else_e_body (stmt), region);
> +      break;
> +
>     default:
>       /* A type, a decl, or some kind of statement that we're not
>         interested in.  Don't walk them.  */
> @@ -534,6 +539,10 @@ replace_goto_queue_1 (gimple stmt, struc
>     case GIMPLE_EH_FILTER:
>       replace_goto_queue_stmt_list (gimple_eh_filter_failure (stmt), tf);
>       break;
> +    case GIMPLE_EH_ELSE:
> +      replace_goto_queue_stmt_list (gimple_eh_else_n_body (stmt), tf);
> +      replace_goto_queue_stmt_list (gimple_eh_else_e_body (stmt), tf);
> +      break;
>
>     default:
>       /* These won't have gotos in them.  */
> @@ -921,6 +930,21 @@ lower_try_finally_fallthru_label (struct
>   return label;
>  }
>
> +/* A subroutine of lower_try_finally.  If FINALLY consits of a
> +   GIMPLE_EH_ELSE node, return it.  */
> +
> +static inline gimple
> +get_eh_else (gimple_seq finally)
> +{
> +  gimple x = gimple_seq_first_stmt (finally);
> +  if (gimple_code (x) == GIMPLE_EH_ELSE)
> +    {
> +      gcc_assert (gimple_seq_singleton_p (finally));
> +      return x;
> +    }
> +  return NULL;
> +}
> +
>  /* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
>    langhook returns non-null, then the language requires that the exception
>    path out of a try_finally be treated specially.  To wit: the code within
> @@ -950,7 +974,7 @@ honor_protect_cleanup_actions (struct le
>   gimple_stmt_iterator gsi;
>   bool finally_may_fallthru;
>   gimple_seq finally;
> -  gimple x;
> +  gimple x, eh_else;
>
>   /* First check for nothing to do.  */
>   if (lang_hooks.eh_protect_cleanup_actions == NULL)
> @@ -960,12 +984,18 @@ honor_protect_cleanup_actions (struct le
>     return;
>
>   finally = gimple_try_cleanup (tf->top_p);
> -  finally_may_fallthru = gimple_seq_may_fallthru (finally);
> +  eh_else = get_eh_else (finally);
>
>   /* Duplicate the FINALLY block.  Only need to do this for try-finally,
> -     and not for cleanups.  */
> -  if (this_state)
> +     and not for cleanups.  If we've got an EH_ELSE, extract it now.  */
> +  if (eh_else)
> +    {
> +      finally = gimple_eh_else_e_body (eh_else);
> +      gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
> +    }
> +  else if (this_state)
>     finally = lower_try_finally_dup_block (finally, outer_state);
> +  finally_may_fallthru = gimple_seq_may_fallthru (finally);
>
>   /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
>      set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
> @@ -1011,7 +1041,7 @@ lower_try_finally_nofallthru (struct leh
>                              struct leh_tf_state *tf)
>  {
>   tree lab;
> -  gimple x;
> +  gimple x, eh_else;
>   gimple_seq finally;
>   struct goto_queue_node *q, *qe;
>
> @@ -1034,15 +1064,35 @@ lower_try_finally_nofallthru (struct leh
>
>   replace_goto_queue (tf);
>
> -  lower_eh_constructs_1 (state, finally);
> -  gimple_seq_add_seq (&tf->top_p_seq, finally);
> +  /* Emit the finally block into the stream.  Lower EH_ELSE at this time.
>  */
> +  eh_else = get_eh_else (finally);
> +  if (eh_else)
> +    {
> +      finally = gimple_eh_else_n_body (eh_else);
> +      lower_eh_constructs_1 (state, finally);
> +      gimple_seq_add_seq (&tf->top_p_seq, finally);
>
> -  if (tf->may_throw)
> +      if (tf->may_throw)
> +       {
> +         finally = gimple_eh_else_e_body (eh_else);
> +         lower_eh_constructs_1 (state, finally);
> +
> +         emit_post_landing_pad (&eh_seq, tf->region);
> +         gimple_seq_add_seq (&eh_seq, finally);
> +       }
> +    }
> +  else
>     {
> -      emit_post_landing_pad (&eh_seq, tf->region);
> +      lower_eh_constructs_1 (state, finally);
> +      gimple_seq_add_seq (&tf->top_p_seq, finally);
>
> -      x = gimple_build_goto (lab);
> -      gimple_seq_add_stmt (&eh_seq, x);
> +      if (tf->may_throw)
> +       {
> +         emit_post_landing_pad (&eh_seq, tf->region);
> +
> +         x = gimple_build_goto (lab);
> +         gimple_seq_add_stmt (&eh_seq, x);
> +       }
>     }
>  }
>
> @@ -1062,6 +1112,18 @@ lower_try_finally_onedest (struct leh_st
>   finally = gimple_try_cleanup (tf->top_p);
>   tf->top_p_seq = gimple_try_eval (tf->top_p);
>
> +  /* Since there's only one destination, and the destination edge can only
> +     either be EH or non-EH, that implies that all of our incoming edges
> +     are of the same type.  Therefore we can lower EH_ELSE immediately.  */
> +  x = get_eh_else (finally);
> +  if (x)
> +    {
> +      if (tf->may_throw)
> +        finally = gimple_eh_else_e_body (x);
> +      else
> +        finally = gimple_eh_else_n_body (x);
> +    }
> +
>   lower_eh_constructs_1 (state, finally);
>
>   if (tf->may_throw)
> @@ -1132,11 +1194,18 @@ lower_try_finally_copy (struct leh_state
>   gimple_seq finally;
>   gimple_seq new_stmt;
>   gimple_seq seq;
> -  gimple x;
> +  gimple x, eh_else;
>   tree tmp;
>   location_t tf_loc = gimple_location (tf->try_finally_expr);
>
>   finally = gimple_try_cleanup (tf->top_p);
> +
> +  /* Notice EH_ELSE, and simplify some of the remaining code
> +     by considering FINALLY to be the normal return path only.  */
> +  eh_else = get_eh_else (finally);
> +  if (eh_else)
> +    finally = gimple_eh_else_n_body (eh_else);
> +
>   tf->top_p_seq = gimple_try_eval (tf->top_p);
>   new_stmt = NULL;
>
> @@ -1153,7 +1222,12 @@ lower_try_finally_copy (struct leh_state
>
>   if (tf->may_throw)
>     {
> -      seq = lower_try_finally_dup_block (finally, state);
> +      /* We don't need to copy the EH path of EH_ELSE,
> +        since it is only emitted once.  */
> +      if (eh_else)
> +        seq = gimple_eh_else_e_body (eh_else);
> +      else
> +        seq = lower_try_finally_dup_block (finally, state);
>       lower_eh_constructs_1 (state, seq);
>
>       emit_post_landing_pad (&eh_seq, tf->region);
> @@ -1252,7 +1326,7 @@ lower_try_finally_switch (struct leh_sta
>   tree last_case;
>   VEC (tree,heap) *case_label_vec;
>   gimple_seq switch_body;
> -  gimple x;
> +  gimple x, eh_else;
>   tree tmp;
>   gimple switch_stmt;
>   gimple_seq finally;
> @@ -1263,9 +1337,10 @@ lower_try_finally_switch (struct leh_sta
>   location_t finally_loc;
>
>   switch_body = gimple_seq_alloc ();
> +  finally = gimple_try_cleanup (tf->top_p);
> +  eh_else = get_eh_else (finally);
>
>   /* Mash the TRY block to the head of the chain.  */
> -  finally = gimple_try_cleanup (tf->top_p);
>   tf->top_p_seq = gimple_try_eval (tf->top_p);
>
>   /* The location of the finally is either the last stmt in the finally
> @@ -1281,7 +1356,7 @@ lower_try_finally_switch (struct leh_sta
>   nlabels = VEC_length (tree, tf->dest_array);
>   return_index = nlabels;
>   eh_index = return_index + tf->may_return;
> -  fallthru_index = eh_index + tf->may_throw;
> +  fallthru_index = eh_index + (tf->may_throw && !eh_else);
>   ndests = fallthru_index + tf->may_fallthru;
>
>   finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
> @@ -1319,7 +1394,23 @@ lower_try_finally_switch (struct leh_sta
>       gimple_seq_add_stmt (&switch_body, x);
>     }
>
> -  if (tf->may_throw)
> +  /* For EH_ELSE, emit the exception path (plus resx) now, then
> +     subsequently we only need consider the normal path.  */
> +  if (eh_else)
> +    {
> +      if (tf->may_throw)
> +       {
> +         finally = gimple_eh_else_e_body (eh_else);
> +         lower_eh_constructs_1 (state, finally);
> +
> +         emit_post_landing_pad (&eh_seq, tf->region);
> +         gimple_seq_add_seq (&eh_seq, finally);
> +         emit_resx (&eh_seq, tf->region);
> +       }
> +
> +      finally = gimple_eh_else_n_body (eh_else);
> +    }
> +  else if (tf->may_throw)
>     {
>       emit_post_landing_pad (&eh_seq, tf->region);
>
> @@ -1452,12 +1543,22 @@ lower_try_finally_switch (struct leh_sta
>    the estimate of the size of the switch machinery we'd have to add.  */
>
>  static bool
> -decide_copy_try_finally (int ndests, gimple_seq finally)
> +decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
>  {
>   int f_estimate, sw_estimate;
> +  gimple eh_else;
> +
> +  /* If there's an EH_ELSE involved, the exception path is separate
> +     and really doesn't come into play for this computation.  */
> +  eh_else = get_eh_else (finally);
> +  if (eh_else)
> +    {
> +      ndests -= may_throw;
> +      finally = gimple_eh_else_n_body (eh_else);
> +    }
>
>   if (!optimize)
> -    return false;
> +    return ndests == 1;
>
>   /* Finally estimate N times, plus N gotos.  */
>   f_estimate = count_insns_seq (finally, &eni_size_weights);
> @@ -1563,7 +1664,8 @@ lower_try_finally (struct leh_state *sta
>   /* We can easily special-case redirection to a single destination.  */
>   else if (ndests == 1)
>     lower_try_finally_onedest (state, &this_tf);
> -  else if (decide_copy_try_finally (ndests, gimple_try_cleanup (tp)))
> +  else if (decide_copy_try_finally (ndests, this_tf.may_throw,
> +                                   gimple_try_cleanup (tp)))
>     lower_try_finally_copy (state, &this_tf);
>   else
>     lower_try_finally_switch (state, &this_tf);
> @@ -1928,6 +2030,9 @@ lower_eh_constructs_2 (struct leh_state
>                case GIMPLE_EH_MUST_NOT_THROW:
>                    replace = lower_eh_must_not_throw (state, stmt);
>                    break;
> +               case GIMPLE_EH_ELSE:
> +                   /* This code is only valid with GIMPLE_TRY_FINALLY.  */
> +                   gcc_unreachable ();
>                default:
>                    replace = lower_cleanup (state, stmt);
>                    break;
> @@ -1942,6 +2047,10 @@ lower_eh_constructs_2 (struct leh_state
>       /* Return since we don't want gsi_next () */
>       return;
>
> +    case GIMPLE_EH_ELSE:
> +      /* We should be eliminating this in lower_try_finally et al.  */
> +      gcc_unreachable ();
> +
>     default:
>       /* A type, a decl, or some kind of statement that we're not
>         interested in.  Don't walk them.  */
> @@ -2832,6 +2941,10 @@ refactor_eh_r (gimple_seq seq)
>          case GIMPLE_EH_FILTER:
>            refactor_eh_r (gimple_eh_filter_failure (one));
>            break;
> +         case GIMPLE_EH_ELSE:
> +           refactor_eh_r (gimple_eh_else_n_body (one));
> +           refactor_eh_r (gimple_eh_else_e_body (one));
> +           break;
>          default:
>            break;
>          }
> Index: gcc/gimple-pretty-print.c
> ===================================================================
> --- gcc/gimple-pretty-print.c   (.../trunk)     (revision 180744)
> +++ gcc/gimple-pretty-print.c   (.../branches/transactional-memory)
> (revision 180773)
> @@ -33,6 +33,7 @@ along with GCC; see the file COPYING3.
>  #include "tree-pass.h"
>  #include "gimple.h"
>  #include "value-prof.h"
> +#include "trans-mem.h"
>
>  #define INDENT(SPACE)                                                  \
>   do { int i; for (i = 0; i < SPACE; i++) pp_space (buffer); } while (0)
> @@ -162,6 +163,7 @@ debug_gimple_seq (gimple_seq seq)
>      'd' - outputs an int as a decimal,
>      's' - outputs a string,
>      'n' - outputs a newline,
> +     'x' - outputs an int as hexadecimal,
>      '+' - increases indent by 2 then outputs a newline,
>      '-' - decreases indent by 2 then outputs a newline.   */
>
> @@ -216,6 +218,10 @@ dump_gimple_fmt (pretty_printer *buffer,
>                 newline_and_indent (buffer, spc);
>                 break;
>
> +              case 'x':
> +                pp_scalar (buffer, "%x", va_arg (args, int));
> +                break;
> +
>               case '+':
>                 spc += 2;
>                 newline_and_indent (buffer, spc);
> @@ -622,6 +628,7 @@ static void
>  dump_gimple_call (pretty_printer *buffer, gimple gs, int spc, int flags)
>  {
>   tree lhs = gimple_call_lhs (gs);
> +  tree fn = gimple_call_fn (gs);
>
>   if (flags & TDF_ALIAS)
>     {
> @@ -648,8 +655,7 @@ dump_gimple_call (pretty_printer *buffer
>        dump_gimple_fmt (buffer, spc, flags, "%G <%s, %T", gs,
>                         internal_fn_name (gimple_call_internal_fn (gs)),
> lhs);
>       else
> -       dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T",
> -                        gs, gimple_call_fn (gs), lhs);
> +       dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T", gs, fn, lhs);
>       if (gimple_call_num_args (gs) > 0)
>         {
>           pp_string (buffer, ", ");
> @@ -672,7 +678,7 @@ dump_gimple_call (pretty_printer *buffer
>       if (gimple_call_internal_p (gs))
>        pp_string (buffer, internal_fn_name (gimple_call_internal_fn (gs)));
>       else
> -       print_call_name (buffer, gimple_call_fn (gs), flags);
> +       print_call_name (buffer, fn, flags);
>       pp_string (buffer, " (");
>       dump_gimple_call_args (buffer, gs, flags);
>       pp_character (buffer, ')');
> @@ -689,9 +695,63 @@ dump_gimple_call (pretty_printer *buffer
>
>   if (gimple_call_return_slot_opt_p (gs))
>     pp_string (buffer, " [return slot optimization]");
> -
>   if (gimple_call_tail_p (gs))
>     pp_string (buffer, " [tail call]");
> +
> +  /* Dump the arguments of _ITM_beginTransaction sanely.  */
> +  if (TREE_CODE (fn) == ADDR_EXPR)
> +    fn = TREE_OPERAND (fn, 0);
> +  if (TREE_CODE (fn) == FUNCTION_DECL && DECL_IS_TM_CLONE (fn))
> +    pp_string (buffer, " [tm-clone]");
> +  if (TREE_CODE (fn) == FUNCTION_DECL
> +      && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
> +      && DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_START
> +      /* Check we're referring to Intel's TM specifications.  */
> +      && !strcmp (IDENTIFIER_POINTER (DECL_NAME (fn)),
> +                 "__builtin__ITM_beginTransaction")

Huh.  Are there others that would use the same builtin?

> +      && gimple_call_num_args (gs) > 0
> +      )

) goes to the previouys line.

> +    {
> +      tree t = gimple_call_arg (gs, 0);
> +      unsigned HOST_WIDE_INT props;
> +      gcc_assert (TREE_CODE (t) == INTEGER_CST);
> +
> +      pp_string (buffer, " [ ");
> +
> +      /* Get the transaction code properties.  */
> +      props = TREE_INT_CST_LOW (t);
> +
> +      if (props & PR_INSTRUMENTEDCODE)
> +       pp_string (buffer, "instrumentedCode ");
> +      if (props & PR_UNINSTRUMENTEDCODE)
> +       pp_string (buffer, "uninstrumentedCode ");
> +      if (props & PR_HASNOXMMUPDATE)
> +       pp_string (buffer, "hasNoXMMUpdate ");
> +      if (props & PR_HASNOABORT)
> +       pp_string (buffer, "hasNoAbort ");
> +      if (props & PR_HASNOIRREVOCABLE)
> +       pp_string (buffer, "hasNoIrrevocable ");
> +      if (props & PR_DOESGOIRREVOCABLE)
> +       pp_string (buffer, "doesGoIrrevocable ");
> +      if (props & PR_HASNOSIMPLEREADS)
> +       pp_string (buffer, "hasNoSimpleReads ");
> +      if (props & PR_AWBARRIERSOMITTED)
> +       pp_string (buffer, "awBarriersOmitted ");
> +      if (props & PR_RARBARRIERSOMITTED)
> +       pp_string (buffer, "RaRBarriersOmitted ");
> +      if (props & PR_UNDOLOGCODE)
> +       pp_string (buffer, "undoLogCode ");
> +      if (props & PR_PREFERUNINSTRUMENTED)
> +       pp_string (buffer, "preferUninstrumented ");
> +      if (props & PR_EXCEPTIONBLOCK)
> +       pp_string (buffer, "exceptionBlock ");
> +      if (props & PR_HASELSE)
> +       pp_string (buffer, "hasElse ");
> +      if (props & PR_READONLY)
> +       pp_string (buffer, "readOnly ");
> +
> +      pp_string (buffer, "]");
> +    }
>  }
>
>
> @@ -947,6 +1007,24 @@ dump_gimple_eh_must_not_throw (pretty_pr
>  }
>
>
> +/* Dump a GIMPLE_EH_ELSE tuple on the pretty_printer BUFFER, SPC spaces of
> +   indent.  FLAGS specifies details to show in the dump (see TDF_* in
> +   tree-pass.h).  */
> +
> +static void
> +dump_gimple_eh_else (pretty_printer *buffer, gimple gs, int spc, int flags)
> +{
> +  if (flags & TDF_RAW)
> +    dump_gimple_fmt (buffer, spc, flags,
> +                     "%G <%+N_BODY <%S>%nE_BODY <%S>%->", gs,
> +                     gimple_eh_else_n_body (gs), gimple_eh_else_e_body
> (gs));
> +  else
> +    dump_gimple_fmt (buffer, spc, flags,
> +                    "<<<if_normal_exit>>>%+{%S}%-<<<else_eh_exit>>>%+{%S}",
> +                     gimple_eh_else_n_body (gs), gimple_eh_else_e_body
> (gs));
> +}
> +
> +
>  /* Dump a GIMPLE_RESX tuple on the pretty_printer BUFFER, SPC spaces of
>    indent.  FLAGS specifies details to show in the dump (see TDF_* in
>    tree-pass.h).  */
> @@ -1269,6 +1347,86 @@ dump_gimple_omp_return (pretty_printer *
>     }
>  }
>
> +/* Dump a GIMPLE_TRANSACTION tuple on the pretty_printer BUFFER.  */
> +
> +static void
> +dump_gimple_transaction (pretty_printer *buffer, gimple gs, int spc, int
> flags)
> +{
> +  unsigned subcode = gimple_transaction_subcode (gs);
> +
> +  if (flags & TDF_RAW)
> +    {
> +      dump_gimple_fmt (buffer, spc, flags,
> +                      "%G [SUBCODE=%x,LABEL=%T] <%+BODY <%S> >",
> +                      gs, subcode, gimple_transaction_label (gs),
> +                      gimple_transaction_body (gs));
> +    }
> +  else
> +    {
> +      if (subcode & GTMA_IS_OUTER)
> +       pp_string (buffer, "__transaction_atomic [[outer]]");
> +      else if (subcode & GTMA_IS_RELAXED)
> +       pp_string (buffer, "__transaction_relaxed");
> +      else
> +       pp_string (buffer, "__transaction_atomic");
> +      subcode &= ~GTMA_DECLARATION_MASK;
> +
> +      if (subcode || gimple_transaction_label (gs))
> +       {
> +         pp_string (buffer, "  //");
> +         if (gimple_transaction_label (gs))
> +           {
> +             pp_string (buffer, " LABEL=");
> +             dump_generic_node (buffer, gimple_transaction_label (gs),
> +                                spc, flags, false);
> +           }
> +         if (subcode)
> +           {
> +             pp_string (buffer, " SUBCODE=[ ");
> +             if (subcode & GTMA_HAVE_ABORT)
> +               {
> +                 pp_string (buffer, "GTMA_HAVE_ABORT ");
> +                 subcode &= ~GTMA_HAVE_ABORT;
> +               }
> +             if (subcode & GTMA_HAVE_LOAD)
> +               {
> +                 pp_string (buffer, "GTMA_HAVE_LOAD ");
> +                 subcode &= ~GTMA_HAVE_LOAD;
> +               }
> +             if (subcode & GTMA_HAVE_STORE)
> +               {
> +                 pp_string (buffer, "GTMA_HAVE_STORE ");
> +                 subcode &= ~GTMA_HAVE_STORE;
> +               }
> +             if (subcode & GTMA_MAY_ENTER_IRREVOCABLE)
> +               {
> +                 pp_string (buffer, "GTMA_MAY_ENTER_IRREVOCABLE ");
> +                 subcode &= ~GTMA_MAY_ENTER_IRREVOCABLE;
> +               }
> +             if (subcode & GTMA_DOES_GO_IRREVOCABLE)
> +               {
> +                 pp_string (buffer, "GTMA_DOES_GO_IRREVOCABLE ");
> +                 subcode &= ~GTMA_DOES_GO_IRREVOCABLE;
> +               }
> +             if (subcode)
> +               pp_printf (buffer, "0x%x ", subcode);
> +             pp_string (buffer, "]");
> +           }
> +       }
> +
> +      if (!gimple_seq_empty_p (gimple_transaction_body (gs)))
> +       {
> +         newline_and_indent (buffer, spc + 2);
> +         pp_character (buffer, '{');
> +         pp_newline (buffer);
> +         dump_gimple_seq (buffer, gimple_transaction_body (gs),
> +                          spc + 4, flags);
> +         newline_and_indent (buffer, spc + 2);
> +         pp_character (buffer, '}');
> +       }
> +    }
> +}
> +
>  /* Dump a GIMPLE_ASM tuple on the pretty_printer BUFFER, SPC spaces of
>    indent.  FLAGS specifies details to show in the dump (see TDF_* in
>    tree-pass.h).  */
> @@ -1855,6 +2013,10 @@ dump_gimple_stmt (pretty_printer *buffer
>       dump_gimple_eh_must_not_throw (buffer, gs, spc, flags);
>       break;
>
> +    case GIMPLE_EH_ELSE:
> +      dump_gimple_eh_else (buffer, gs, spc, flags);
> +      break;
> +
>     case GIMPLE_RESX:
>       dump_gimple_resx (buffer, gs, spc, flags);
>       break;
> @@ -1877,6 +2039,10 @@ dump_gimple_stmt (pretty_printer *buffer
>       pp_string (buffer, " predictor.");
>       break;
>
> +    case GIMPLE_TRANSACTION:
> +      dump_gimple_transaction (buffer, gs, spc, flags);
> +      break;
> +
>     default:
>       GIMPLE_NIY;
>     }
> Index: gcc/gimplify.c
> ===================================================================
> --- gcc/gimplify.c      (.../trunk)     (revision 180744)
> +++ gcc/gimplify.c      (.../branches/transactional-memory)     (revision
> 180773)
> @@ -413,6 +413,8 @@ create_tmp_var_name (const char *prefix)
>       char *preftmp = ASTRDUP (prefix);
>
>       remove_suffix (preftmp, strlen (preftmp));
> +      clean_symbol_name (preftmp);
> +
>       prefix = preftmp;
>     }
>
> @@ -1072,6 +1074,12 @@ voidify_wrapper_expr (tree wrapper, tree
>                }
>              break;
>
> +           case TRANSACTION_EXPR:
> +             TREE_SIDE_EFFECTS (*p) = 1;
> +             TREE_TYPE (*p) = void_type_node;
> +             p = &TRANSACTION_EXPR_BODY (*p);
> +             break;
> +
>            default:
>              goto out;
>            }
> @@ -6527,6 +6535,53 @@ gimplify_omp_atomic (tree *expr_p, gimpl
>    return GS_ALL_DONE;
>  }
>
> +/* Gimplify a TRANSACTION_EXPR.  This involves gimplification of the
> +   body, and adding some EH bits.  */
> +
> +static enum gimplify_status
> +gimplify_transaction (tree *expr_p, gimple_seq *pre_p)
> +{
> +  tree expr = *expr_p, temp, tbody = TRANSACTION_EXPR_BODY (expr);
> +  gimple g;
> +  gimple_seq body = NULL;
> +  struct gimplify_ctx gctx;
> +  int subcode = 0;
> +
> +  /* Wrap the transaction body in a BIND_EXPR so we have a context
> +     where to put decls for OpenMP.  */
> +  if (TREE_CODE (tbody) != BIND_EXPR)
> +    {
> +      tree bind = build3 (BIND_EXPR, void_type_node, NULL, tbody, NULL);
> +      TREE_SIDE_EFFECTS (bind) = 1;
> +      SET_EXPR_LOCATION (bind, EXPR_LOCATION (tbody));
> +      TRANSACTION_EXPR_BODY (expr) = bind;
> +    }
> +
> +  push_gimplify_context (&gctx);
> +  temp = voidify_wrapper_expr (*expr_p, NULL);
> +
> +  g = gimplify_and_return_first (TRANSACTION_EXPR_BODY (expr), &body);
> +  pop_gimplify_context (g);
> +
> +  g = gimple_build_transaction (body, NULL);
> +  if (TRANSACTION_EXPR_OUTER (expr))
> +    subcode = GTMA_IS_OUTER;
> +  else if (TRANSACTION_EXPR_RELAXED (expr))
> +    subcode = GTMA_IS_RELAXED;
> +  gimple_transaction_set_subcode (g, subcode);
> +
> +  gimplify_seq_add_stmt (pre_p, g);
> +
> +  if (temp)
> +    {
> +      *expr_p = temp;
> +      return GS_OK;
> +    }
> +
> +  *expr_p = NULL_TREE;
> +  return GS_ALL_DONE;
> +}
> +
>  /* Convert the GENERIC expression tree *EXPR_P to GIMPLE.  If the
>    expression produces a value to be used as an operand inside a GIMPLE
>    statement, the value will be stored back in *EXPR_P.  This value will
> @@ -7251,6 +7306,10 @@ gimplify_expr (tree *expr_p, gimple_seq
>          ret = gimplify_omp_atomic (expr_p, pre_p);
>          break;
>
> +        case TRANSACTION_EXPR:
> +          ret = gimplify_transaction (expr_p, pre_p);
> +          break;
> +
>        case TRUTH_AND_EXPR:
>        case TRUTH_OR_EXPR:
>        case TRUTH_XOR_EXPR:
> Index: gcc/calls.c
> ===================================================================
> --- gcc/calls.c (.../trunk)     (revision 180744)
> +++ gcc/calls.c (.../branches/transactional-memory)     (revision 180773)
> @@ -496,7 +496,60 @@ emit_call_1 (rtx funexp, tree fntree ATT
>  static int
>  special_function_p (const_tree fndecl, int flags)
>  {
> -  if (fndecl && DECL_NAME (fndecl)
> +  if (fndecl == NULL)
> +    return flags;
> +
> +  if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
> +    {
> +      switch (DECL_FUNCTION_CODE (fndecl))
> +       {
> +       case BUILT_IN_TM_COMMIT:
> +       case BUILT_IN_TM_COMMIT_EH:
> +       case BUILT_IN_TM_ABORT:
> +       case BUILT_IN_TM_IRREVOCABLE:
> +       case BUILT_IN_TM_GETTMCLONE_IRR:
> +       case BUILT_IN_TM_MEMCPY:
> +       case BUILT_IN_TM_MEMMOVE:
> +        case BUILT_IN_TM_MEMSET:
> +       CASE_BUILT_IN_TM_STORE (1):
> +       CASE_BUILT_IN_TM_STORE (2):
> +       CASE_BUILT_IN_TM_STORE (4):
> +       CASE_BUILT_IN_TM_STORE (8):
> +       CASE_BUILT_IN_TM_STORE (FLOAT):
> +       CASE_BUILT_IN_TM_STORE (DOUBLE):
> +       CASE_BUILT_IN_TM_STORE (LDOUBLE):
> +       CASE_BUILT_IN_TM_STORE (M64):
> +       CASE_BUILT_IN_TM_STORE (M128):
> +       CASE_BUILT_IN_TM_STORE (M256):
> +       CASE_BUILT_IN_TM_LOAD (1):
> +       CASE_BUILT_IN_TM_LOAD (2):
> +       CASE_BUILT_IN_TM_LOAD (4):
> +       CASE_BUILT_IN_TM_LOAD (8):
> +       CASE_BUILT_IN_TM_LOAD (FLOAT):
> +       CASE_BUILT_IN_TM_LOAD (DOUBLE):
> +       CASE_BUILT_IN_TM_LOAD (LDOUBLE):
> +       CASE_BUILT_IN_TM_LOAD (M64):
> +       CASE_BUILT_IN_TM_LOAD (M128):
> +       CASE_BUILT_IN_TM_LOAD (M256):
> +       case BUILT_IN_TM_LOG:
> +       case BUILT_IN_TM_LOG_1:
> +       case BUILT_IN_TM_LOG_2:
> +       case BUILT_IN_TM_LOG_4:
> +       case BUILT_IN_TM_LOG_8:
> +       case BUILT_IN_TM_LOG_FLOAT:
> +       case BUILT_IN_TM_LOG_DOUBLE:
> +       case BUILT_IN_TM_LOG_LDOUBLE:
> +       case BUILT_IN_TM_LOG_M64:
> +       case BUILT_IN_TM_LOG_M128:
> +       case BUILT_IN_TM_LOG_M256:
> +         flags |= ECF_TM_OPS;
> +         break;
> +       default:
> +         break;
> +       }
> +    }

This should not be in special_function_p which is solely to check
for the identifiers.  Instead the caller of this function should handle
the builtin codes (flags_from_decl_or_type).

> +  if (DECL_NAME (fndecl)
>       && IDENTIFIER_LENGTH (DECL_NAME (fndecl)) <= 17
>       /* Exclude functions not at the file scope, or not `extern',
>         since they are not the magic functions we would otherwise
> @@ -644,6 +697,9 @@ flags_from_decl_or_type (const_tree exp)
>       if (TREE_NOTHROW (exp))
>        flags |= ECF_NOTHROW;
>
> +      if (DECL_IS_TM_CLONE (exp))
> +       flags |= ECF_TM_OPS;
> +

Thus, here.

>       flags = special_function_p (exp, flags);
>     }
>   else if (TYPE_P (exp) && TYPE_READONLY (exp))
> Index: gcc/tree-inline.c
> ===================================================================
> --- gcc/tree-inline.c   (.../trunk)     (revision 180744)
> +++ gcc/tree-inline.c   (.../branches/transactional-memory)     (revision
> 180773)
> @@ -1365,6 +1365,12 @@ remap_gimple_stmt (gimple stmt, copy_bod
>            = gimple_build_omp_critical (s1, gimple_omp_critical_name
> (stmt));
>          break;
>
> +       case GIMPLE_TRANSACTION:
> +         s1 = remap_gimple_seq (gimple_transaction_body (stmt), id);
> +         copy = gimple_build_transaction (s1, gimple_transaction_label
> (stmt));
> +         gimple_transaction_set_subcode (copy, gimple_transaction_subcode
> (stmt));
> +         break;
> +
>        default:
>          gcc_unreachable ();
>        }
> @@ -3600,6 +3606,11 @@ estimate_num_insns (gimple stmt, eni_wei
>       return (weights->omp_cost
>               + estimate_num_insns_seq (gimple_omp_body (stmt), weights));
>
> +    case GIMPLE_TRANSACTION:
> +      return (weights->tm_cost
> +             + estimate_num_insns_seq (gimple_transaction_body (stmt),
> +                                       weights));
> +

Huh, so we now have non-lowered gimple sub-sequence throughout all
optimizations (inlining especially)?  :(

I think I miss tree-cfg.c parts that do any verification of the new gimple
kinds.

>     default:
>       gcc_unreachable ();
>     }
> @@ -3639,6 +3650,7 @@ init_inline_once (void)
>   eni_size_weights.target_builtin_call_cost = 1;
>   eni_size_weights.div_mod_cost = 1;
>   eni_size_weights.omp_cost = 40;
> +  eni_size_weights.tm_cost = 10;
>   eni_size_weights.time_based = false;
>   eni_size_weights.return_cost = 1;
>
> @@ -3650,6 +3662,7 @@ init_inline_once (void)
>   eni_time_weights.target_builtin_call_cost = 1;
>   eni_time_weights.div_mod_cost = 10;
>   eni_time_weights.omp_cost = 40;
> +  eni_time_weights.tm_cost = 40;
>   eni_time_weights.time_based = true;
>   eni_time_weights.return_cost = 2;
>  }
> Index: gcc/tree-inline.h
> ===================================================================
> --- gcc/tree-inline.h   (.../trunk)     (revision 180744)
> +++ gcc/tree-inline.h   (.../branches/transactional-memory)     (revision
> 180773)
> @@ -144,6 +144,9 @@ typedef struct eni_weights_d
>   /* Cost for omp construct.  */
>   unsigned omp_cost;
>
> +  /* Cost for tm transaction.  */
> +  unsigned tm_cost;
> +
>   /* Cost of return.  */
>   unsigned return_cost;
>
> Index: gcc/gimple.c
> ===================================================================
> --- gcc/gimple.c        (.../trunk)     (revision 180744)
> +++ gcc/gimple.c        (.../branches/transactional-memory)     (revision
> 180773)
> @@ -743,6 +743,17 @@ gimple_build_eh_must_not_throw (tree dec
>   return p;
>  }
>
> +/* Build a GIMPLE_EH_ELSE statement.  */
> +
> +gimple
> +gimple_build_eh_else (gimple_seq n_body, gimple_seq e_body)
> +{
> +  gimple p = gimple_alloc (GIMPLE_EH_ELSE, 0);
> +  gimple_eh_else_set_n_body (p, n_body);
> +  gimple_eh_else_set_e_body (p, e_body);
> +  return p;
> +}
> +
>  /* Build a GIMPLE_TRY statement.
>
>    EVAL is the expression to evaluate.
> @@ -1146,6 +1157,17 @@ gimple_build_omp_atomic_store (tree val)
>   return p;
>  }
>
> +/* Build a GIMPLE_TRANSACTION statement.  */
> +
> +gimple
> +gimple_build_transaction (gimple_seq body, tree label)
> +{
> +  gimple p = gimple_alloc (GIMPLE_TRANSACTION, 0);
> +  gimple_transaction_set_body (p, body);
> +  gimple_transaction_set_label (p, label);
> +  return p;
> +}
> +
>  /* Build a GIMPLE_PREDICT statement.  PREDICT is one of the predictors from
>    predict.def, OUTCOME is NOT_TAKEN or TAKEN.  */
>
> @@ -1331,7 +1353,7 @@ walk_gimple_seq (gimple_seq seq, walk_st

As you are changing features of this walker you should update its
documentation.

>  {
>   gimple_stmt_iterator gsi;
>
> -  for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
> +  for (gsi = gsi_start (seq); !gsi_end_p (gsi); )
>     {
>       tree ret = walk_gimple_stmt (&gsi, callback_stmt, callback_op, wi);
>       if (ret)
> @@ -1340,8 +1362,12 @@ walk_gimple_seq (gimple_seq seq, walk_st
>             to hold it.  */
>          gcc_assert (wi);
>          wi->callback_result = ret;
> -         return gsi_stmt (gsi);
> +
> +         return wi->removed_stmt ? NULL : gsi_stmt (gsi);
>        }
> +
> +      if (!wi->removed_stmt)
> +       gsi_next (&gsi);
>     }
>
>   if (wi)
> @@ -1680,6 +1706,13 @@ walk_gimple_op (gimple stmt, walk_tree_f
>        return ret;
>       break;
>
> +    case GIMPLE_TRANSACTION:
> +      ret = walk_tree (gimple_transaction_label_ptr (stmt), callback_op,
> +                      wi, pset);
> +      if (ret)
> +       return ret;
> +      break;
> +
>       /* Tuples that do not have operands.  */
>     case GIMPLE_NOP:
>     case GIMPLE_RESX:
> @@ -1730,10 +1763,13 @@ walk_gimple_stmt (gimple_stmt_iterator *
>   gimple stmt = gsi_stmt (*gsi);
>
>   if (wi)
> -    wi->gsi = *gsi;
> +    {
> +      wi->gsi = *gsi;
> +      wi->removed_stmt = false;
>
> -  if (wi && wi->want_locations && gimple_has_location (stmt))
> -    input_location = gimple_location (stmt);
> +      if (wi->want_locations && gimple_has_location (stmt))
> +       input_location = gimple_location (stmt);
> +    }
>
>   ret = NULL;
>
> @@ -1751,6 +1787,8 @@ walk_gimple_stmt (gimple_stmt_iterator *
>       gcc_assert (tree_ret == NULL);
>
>       /* Re-read stmt in case the callback changed it.  */
> +      if (wi && wi->removed_stmt)
> +       return NULL;
>       stmt = gsi_stmt (*gsi);
>     }
>
> @@ -1786,6 +1824,17 @@ walk_gimple_stmt (gimple_stmt_iterator *
>        return wi->callback_result;
>       break;
>
> +    case GIMPLE_EH_ELSE:
> +      ret = walk_gimple_seq (gimple_eh_else_n_body (stmt),
> +                            callback_stmt, callback_op, wi);
> +      if (ret)
> +       return wi->callback_result;
> +      ret = walk_gimple_seq (gimple_eh_else_e_body (stmt),
> +                            callback_stmt, callback_op, wi);
> +      if (ret)
> +       return wi->callback_result;
> +      break;
> +
>     case GIMPLE_TRY:
>       ret = walk_gimple_seq (gimple_try_eval (stmt), callback_stmt,
> callback_op,
>                             wi);
> @@ -1813,8 +1862,8 @@ walk_gimple_stmt (gimple_stmt_iterator *
>     case GIMPLE_OMP_TASK:
>     case GIMPLE_OMP_SECTIONS:
>     case GIMPLE_OMP_SINGLE:
> -      ret = walk_gimple_seq (gimple_omp_body (stmt), callback_stmt,
> callback_op,
> -                            wi);
> +      ret = walk_gimple_seq (gimple_omp_body (stmt), callback_stmt,
> +                            callback_op, wi);
>       if (ret)
>        return wi->callback_result;
>       break;
> @@ -1826,6 +1875,13 @@ walk_gimple_stmt (gimple_stmt_iterator *
>        return wi->callback_result;
>       break;
>
> +    case GIMPLE_TRANSACTION:
> +      ret = walk_gimple_seq (gimple_transaction_body (stmt),
> +                            callback_stmt, callback_op, wi);
> +      if (ret)
> +       return wi->callback_result;
> +      break;
> +
>     default:
>       gcc_assert (!gimple_has_substatements (stmt));
>       break;
> @@ -2252,6 +2308,13 @@ gimple_copy (gimple stmt)
>          gimple_eh_filter_set_types (copy, t);
>          break;
>
> +       case GIMPLE_EH_ELSE:
> +         new_seq = gimple_seq_copy (gimple_eh_else_n_body (stmt));
> +         gimple_eh_else_set_n_body (copy, new_seq);
> +         new_seq = gimple_seq_copy (gimple_eh_else_e_body (stmt));
> +         gimple_eh_else_set_e_body (copy, new_seq);
> +         break;
> +
>        case GIMPLE_TRY:
>          new_seq = gimple_seq_copy (gimple_try_eval (stmt));
>          gimple_try_set_eval (copy, new_seq);
> @@ -2327,6 +2390,11 @@ gimple_copy (gimple stmt)
>          gimple_omp_set_body (copy, new_seq);
>          break;
>
> +        case GIMPLE_TRANSACTION:
> +         new_seq = gimple_seq_copy (gimple_transaction_body (stmt));
> +         gimple_transaction_set_body (copy, new_seq);
> +         break;
> +
>        case GIMPLE_WITH_CLEANUP_EXPR:
>          new_seq = gimple_seq_copy (gimple_wce_cleanup (stmt));
>          gimple_wce_set_cleanup (copy, new_seq);
> @@ -2785,7 +2853,7 @@ is_gimple_address (const_tree t)
>  /* Strip out all handled components that produce invariant
>    offsets.  */
>
> -static const_tree
> +const_tree
>  strip_invariant_refs (const_tree op)
>  {
>   while (handled_component_p (op))

If you export this please move it to tree.c.

> @@ -3085,6 +3153,8 @@ get_call_expr_in (tree t)
>     t = TREE_OPERAND (t, 1);
>   if (TREE_CODE (t) == WITH_SIZE_EXPR)
>     t = TREE_OPERAND (t, 0);
> +  if (TREE_CODE (t) == VIEW_CONVERT_EXPR)
> +    t = TREE_OPERAND (t, 0);
>   if (TREE_CODE (t) == CALL_EXPR)
>     return t;
>   return NULL_TREE;

An unused function.  Please move it to where you need it instead,
make it static and adjust it in a way to do exactly what you want.
After the above change it looks strange - handling V_C_E but
not other component refs.

> Index: gcc/gimple.h
> ===================================================================
> --- gcc/gimple.h        (.../trunk)     (revision 180744)
> +++ gcc/gimple.h        (.../branches/transactional-memory)     (revision
> 180773)
> @@ -105,6 +105,7 @@ enum gf_mask {
>     GF_CALL_NOTHROW            = 1 << 5,
>     GF_CALL_ALLOCA_FOR_VAR     = 1 << 6,
>     GF_CALL_INTERNAL           = 1 << 7,
> +    GF_CALL_NOINLINE           = 1 << 8,
>     GF_OMP_PARALLEL_COMBINED   = 1 << 0,

?  Why not use GF_CALL_CANNOT_INLINE?

>     /* True on an GIMPLE_OMP_RETURN statement if the return does not require
> @@ -487,6 +488,15 @@ struct GTY(()) gimple_statement_eh_filte
>   gimple_seq failure;
>  };
>
> +/* GIMPLE_EH_ELSE */
> +
> +struct GTY(()) gimple_statement_eh_else {
> +  /* [ WORD 1-4 ]  */
> +  struct gimple_statement_base gsbase;
> +
> +  /* [ WORD 5,6 ] */
> +  gimple_seq n_body, e_body;
> +};
>
>  /* GIMPLE_EH_MUST_NOT_THROW */
>
> @@ -757,6 +767,43 @@ struct GTY(()) gimple_statement_omp_atom
>   tree val;
>  };
>
> +/* GIMPLE_TRANSACTION.  */
> +
> +/* Bits to be stored in the GIMPLE_TRANSACTION subcode.  */
> +
> +/* The __transaction_atomic was declared [[outer]] or it is
> +   __transaction_relaxed.  */
> +#define GTMA_IS_OUTER                  (1u << 0)
> +#define GTMA_IS_RELAXED                        (1u << 1)
> +#define GTMA_DECLARATION_MASK          (GTMA_IS_OUTER | GTMA_IS_RELAXED)
> +
> +/* The transaction is seen to not have an abort.  */
> +#define GTMA_HAVE_ABORT                        (1u << 2)
> +/* The transaction is seen to have loads or stores.  */
> +#define GTMA_HAVE_LOAD                 (1u << 3)
> +#define GTMA_HAVE_STORE                        (1u << 4)
> +/* The transaction MAY enter serial irrevocable mode in its dynamic scope.
>  */
> +#define GTMA_MAY_ENTER_IRREVOCABLE     (1u << 5)
> +/* The transaction WILL enter serial irrevocable mode.
> +   An irrevocable block post-dominates the entire transaction, such
> +   that all invocations of the transaction will go serial-irrevocable.
> +   In such case, we don't bother instrumenting the transaction, and
> +   tell the runtime that it should begin the transaction in
> +   serial-irrevocable mode.  */
> +#define GTMA_DOES_GO_IRREVOCABLE       (1u << 6)
> +
> +struct GTY(()) gimple_statement_transaction
> +{
> +  /* [ WORD 1-10 ]  */
> +  struct gimple_statement_with_memory_ops_base gsbase;
> +
> +  /* [ WORD 11 ] */
> +  gimple_seq body;
> +
> +  /* [ WORD 12 ] */
> +  tree label;
> +};
> +
>  #define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP)  SYM,
>  enum gimple_statement_structure_enum {
>  #include "gsstruct.def"
> @@ -779,6 +826,7 @@ union GTY ((desc ("gimple_statement_stru
>   struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch;
>   struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER")))
> gimple_eh_filter;
>   struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt;
> +  struct gimple_statement_eh_else GTY ((tag ("GSS_EH_ELSE")))
> gimple_eh_else;
>   struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi;
>   struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL")))
> gimple_eh_ctrl;
>   struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try;
> @@ -793,6 +841,7 @@ union GTY ((desc ("gimple_statement_stru
>   struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE")))
> gimple_omp_continue;
>   struct gimple_statement_omp_atomic_load GTY ((tag
> ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load;
>   struct gimple_statement_omp_atomic_store GTY ((tag
> ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store;
> +  struct gimple_statement_transaction GTY((tag ("GSS_TRANSACTION")))
> gimple_transaction;
>  };
>
>  /* In gimple.c.  */
> @@ -846,6 +895,7 @@ gimple gimple_build_asm_vec (const char
>  gimple gimple_build_catch (tree, gimple_seq);
>  gimple gimple_build_eh_filter (tree, gimple_seq);
>  gimple gimple_build_eh_must_not_throw (tree);
> +gimple gimple_build_eh_else (gimple_seq, gimple_seq);
>  gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags);
>  gimple gimple_build_wce (gimple_seq);
>  gimple gimple_build_resx (int);
> @@ -868,6 +918,7 @@ gimple gimple_build_omp_single (gimple_s
>  gimple gimple_build_cdt (tree, tree);
>  gimple gimple_build_omp_atomic_load (tree, tree);
>  gimple gimple_build_omp_atomic_store (tree);
> +gimple gimple_build_transaction (gimple_seq, tree);
>  gimple gimple_build_predict (enum br_predictor, enum prediction);
>  enum gimple_statement_structure_enum gss_for_assign (enum tree_code);
>  void sort_case_labels (VEC(tree,heap) *);
> @@ -986,6 +1037,7 @@ extern bool walk_stmt_load_store_ops (gi
>                                      bool (*)(gimple, tree, void *),
>                                      bool (*)(gimple, tree, void *));
>  extern bool gimple_ior_addresses_taken (bitmap, gimple);
> +extern const_tree strip_invariant_refs (const_tree);
>  extern bool gimple_call_builtin_p (gimple, enum built_in_function);
>  extern bool gimple_asm_clobbers_memory_p (const_gimple);
>
> @@ -1077,6 +1129,9 @@ extern tree canonicalize_cond_expr_cond
>  /* In omp-low.c.  */
>  extern tree omp_reduction_init (tree, tree);
>
> +/* In trans-mem.c.  */
> +extern void diagnose_tm_safe_errors (tree);
> +
>  /* In tree-nested.c.  */
>  extern void lower_nested_functions (tree);
>  extern void insert_field_into_struct (tree, tree);
> @@ -1135,6 +1190,7 @@ gimple_has_substatements (gimple g)
>     case GIMPLE_BIND:
>     case GIMPLE_CATCH:
>     case GIMPLE_EH_FILTER:
> +    case GIMPLE_EH_ELSE:
>     case GIMPLE_TRY:
>     case GIMPLE_OMP_FOR:
>     case GIMPLE_OMP_MASTER:
> @@ -1146,6 +1202,7 @@ gimple_has_substatements (gimple g)
>     case GIMPLE_OMP_SINGLE:
>     case GIMPLE_OMP_CRITICAL:
>     case GIMPLE_WITH_CLEANUP_EXPR:
> +    case GIMPLE_TRANSACTION:
>       return true;
>
>     default:
> @@ -2436,6 +2493,22 @@ gimple_call_alloca_for_var_p (gimple s)
>   return (s->gsbase.subcode & GF_CALL_ALLOCA_FOR_VAR) != 0;
>  }
>
> +/* Return true if S is a noinline call.  */
> +
> +static inline bool
> +gimple_call_noinline_p (gimple s)
> +{
> +  GIMPLE_CHECK (s, GIMPLE_CALL);
> +  return (s->gsbase.subcode & GF_CALL_NOINLINE) != 0;
> +}
> +
> +static inline void
> +gimple_call_set_noinline_p (gimple s)
> +{
> +  GIMPLE_CHECK (s, GIMPLE_CALL);
> +  s->gsbase.subcode |= GF_CALL_NOINLINE;
> +}

See above.  We have *_cannot_inline already.

>  /* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL.  */
>
>  static inline void
> @@ -3178,6 +3251,35 @@ gimple_eh_must_not_throw_set_fndecl (gim
>   gs->gimple_eh_mnt.fndecl = decl;
>  }
>
> +/* GIMPLE_EH_ELSE accessors.  */
> +
> +static inline gimple_seq
> +gimple_eh_else_n_body (gimple gs)
> +{
> +  GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
> +  return gs->gimple_eh_else.n_body;
> +}
> +
> +static inline gimple_seq
> +gimple_eh_else_e_body (gimple gs)
> +{
> +  GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
> +  return gs->gimple_eh_else.e_body;
> +}
> +
> +static inline void
> +gimple_eh_else_set_n_body (gimple gs, gimple_seq seq)
> +{
> +  GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
> +  gs->gimple_eh_else.n_body = seq;
> +}
> +
> +static inline void
> +gimple_eh_else_set_e_body (gimple gs, gimple_seq seq)
> +{
> +  GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
> +  gs->gimple_eh_else.e_body = seq;
> +}
>
>  /* GIMPLE_TRY accessors. */
>
> @@ -4556,6 +4658,67 @@ gimple_omp_continue_set_control_use (gim
>   g->gimple_omp_continue.control_use = use;
>  }
>
> +/* Return the body for the GIMPLE_TRANSACTION statement GS.  */
> +
> +static inline gimple_seq
> +gimple_transaction_body (gimple gs)
> +{
> +  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
> +  return gs->gimple_transaction.body;
> +}
> +
> +/* Return the label associated with a GIMPLE_TRANSACTION.  */
> +
> +static inline tree
> +gimple_transaction_label (const_gimple gs)
> +{
> +  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
> +  return gs->gimple_transaction.label;
> +}
> +
> +static inline tree *
> +gimple_transaction_label_ptr (gimple gs)
> +{
> +  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
> +  return &gs->gimple_transaction.label;
> +}
> +
> +/* Return the subcode associated with a GIMPLE_TRANSACTION.  */
> +
> +static inline unsigned int
> +gimple_transaction_subcode (const_gimple gs)
> +{
> +  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
> +  return gs->gsbase.subcode;
> +}
> +
> +/* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS.  */
> +
> +static inline void
> +gimple_transaction_set_body (gimple gs, gimple_seq body)
> +{
> +  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
> +  gs->gimple_transaction.body = body;
> +}
> +
> +/* Set the label associated with a GIMPLE_TRANSACTION.  */
> +
> +static inline void
> +gimple_transaction_set_label (gimple gs, tree label)
> +{
> +  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
> +  gs->gimple_transaction.label = label;
> +}
> +
> +/* Set the subcode associated with a GIMPLE_TRANSACTION.  */
> +
> +static inline void
> +gimple_transaction_set_subcode (gimple gs, unsigned int subcode)
> +{
> +  GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
> +  gs->gsbase.subcode = subcode;
> +}
> +
>
>  /* Return a pointer to the return value for GIMPLE_RETURN GS.  */
>
> @@ -4982,6 +5145,12 @@ struct walk_stmt_info
>      will be visited more than once.  */
>   struct pointer_set_t *pset;
>
> +  /* Operand returned by the callbacks.  This is set when calling
> +     walk_gimple_seq.  If the walk_stmt_fn or walk_tree_fn callback
> +     returns non-NULL, this field will contain the tree returned by
> +     the last callback.  */
> +  tree callback_result;
> +
>   /* Indicates whether the operand being examined may be replaced
>      with something that matches is_gimple_val (if true) or something
>      slightly more complicated (if false).  "Something" technically
> @@ -4994,23 +5163,20 @@ struct walk_stmt_info
>      statement 'foo (&var)', the flag VAL_ONLY will initially be set
>      to true, however, when walking &var, the operand of that
>      ADDR_EXPR does not need to be a GIMPLE value.  */
> -  bool val_only;
> +  BOOL_BITFIELD val_only : 1;
>
>   /* True if we are currently walking the LHS of an assignment.  */
> -  bool is_lhs;
> +  BOOL_BITFIELD is_lhs : 1;
>
>   /* Optional.  Set to true by the callback functions if they made any
>      changes.  */
> -  bool changed;
> +  BOOL_BITFIELD changed : 1;
>
>   /* True if we're interested in location information.  */
> -  bool want_locations;
> +  BOOL_BITFIELD want_locations : 1;
>
> -  /* Operand returned by the callbacks.  This is set when calling
> -     walk_gimple_seq.  If the walk_stmt_fn or walk_tree_fn callback
> -     returns non-NULL, this field will contain the tree returned by
> -     the last callback.  */
> -  tree callback_result;
> +  /* True if we've removed the statement that was processed.  */
> +  BOOL_BITFIELD removed_stmt : 1;
>  };
>
>  /* Callback for walk_gimple_stmt.  Called for every statement found
>

Otherwise looks ok to me.

Richard.
Michael Matz Nov. 4, 2011, 3:26 p.m. UTC | #2
Hi,

On Thu, 3 Nov 2011, Aldy Hernandez wrote:

> +/* GIMPLE_EH_ELSE <N_BODY, E_BODY> must be the sole contents of
> +   a GIMPLE_TRY_FINALLY node.  For all normal exits from the try block,
> +   we N_BODY is run; for all exception exits from the try block,

s/we //

> +++ gcc/calls.c	(.../branches/transactional-memory)	(revision 180773)
> @@ -496,7 +496,60 @@ emit_call_1 (rtx funexp, tree fntree ATT
>  static int
>  special_function_p (const_tree fndecl, int flags)
>  {
> +	case BUILT_IN_TM_IRREVOCABLE:
> +	case BUILT_IN_TM_GETTMCLONE_IRR:
> +	case BUILT_IN_TM_MEMCPY:
> +	case BUILT_IN_TM_MEMMOVE:
> +        case BUILT_IN_TM_MEMSET:

Whitespace.

> @@ -1751,6 +1787,8 @@ walk_gimple_stmt (gimple_stmt_iterator *
>        gcc_assert (tree_ret == NULL);
> 
>        /* Re-read stmt in case the callback changed it.  */
> +      if (wi && wi->removed_stmt)
> +	return NULL;
>        stmt = gsi_stmt (*gsi);

Comment belongs to the stmt assignment, not to the new if/return.

> @@ -3085,6 +3153,8 @@ get_call_expr_in (tree t)
>      t = TREE_OPERAND (t, 1);
>    if (TREE_CODE (t) == WITH_SIZE_EXPR)
>      t = TREE_OPERAND (t, 0);
> +  if (TREE_CODE (t) == VIEW_CONVERT_EXPR)
> +    t = TREE_OPERAND (t, 0);
>    if (TREE_CODE (t) == CALL_EXPR)
>      return t;

The function get_call_expr_in is unused in our compiler (and you don't 
introduce a new use), so instead of amending it, just remove it.

> Index: gcc/gimple.h
> ===================================================================
> --- gcc/gimple.h	(.../trunk)	(revision 180744)
> +++ gcc/gimple.h	(.../branches/transactional-memory)	(revision
> 180773)
> @@ -105,6 +105,7 @@ enum gf_mask {
>      GF_CALL_NOTHROW		= 1 << 5,
>      GF_CALL_ALLOCA_FOR_VAR	= 1 << 6,
>      GF_CALL_INTERNAL		= 1 << 7,
> +    GF_CALL_NOINLINE		= 1 << 8,
>      GF_OMP_PARALLEL_COMBINED	= 1 << 0,
...
> +/* Return true if S is a noinline call.  */
> +
> +static inline bool
> +gimple_call_noinline_p (gimple s)
> +{
> +  GIMPLE_CHECK (s, GIMPLE_CALL);
> +  return (s->gsbase.subcode & GF_CALL_NOINLINE) != 0;
> +}
> +
> +static inline void
> +gimple_call_set_noinline_p (gimple s)
> +{
> +  GIMPLE_CHECK (s, GIMPLE_CALL);
> +  s->gsbase.subcode |= GF_CALL_NOINLINE;
> +}

This flag is only used by the new accessors gimple_call_noinline_p and 
gimple_call_set_noinline_p.  The latter is used in 
trans-mem.c:ipa_tm_insert_gettmclone_call, but marked as hack.  The flag 
isn't tested anywhere (i.e. no calls to gimple_call_noinline_p).  Hence 
this whole thing is unused, presumably the hack was transformed into a 
real solution :)  So, don't add the flag or the accessors, and remove the 
call from trans-mem.c.


Ciao,
Michael.
Richard Henderson Nov. 5, 2011, 2:36 a.m. UTC | #3
On 11/04/2011 03:36 AM, Richard Guenther wrote:
>> > +    case GIMPLE_TRANSACTION:
>> > +      return (weights->tm_cost
>> > +             + estimate_num_insns_seq (gimple_transaction_body (stmt),
>> > +                                       weights));
>> > +
> Huh, so we now have non-lowered gimple sub-sequence throughout all
> optimizations (inlining especially)?  :(

No.  I'm not sure why we're still looking at gimple_transaction_body
here -- that should be NULL after lowering.


r~
Richard Henderson Nov. 5, 2011, 2:54 a.m. UTC | #4
On 11/04/2011 07:36 PM, Richard Henderson wrote:
> On 11/04/2011 03:36 AM, Richard Guenther wrote:
>>>> +    case GIMPLE_TRANSACTION:
>>>> +      return (weights->tm_cost
>>>> +             + estimate_num_insns_seq (gimple_transaction_body (stmt),
>>>> +                                       weights));
>>>> +
>> Huh, so we now have non-lowered gimple sub-sequence throughout all
>> optimizations (inlining especially)?  :(
> 
> No.  I'm not sure why we're still looking at gimple_transaction_body
> here -- that should be NULL after lowering.

... of course, I'm not sure why we're looking at all those other
nested statements there inside the inliner either.  At least we're
doing the same thing as everyone else here.


r~
Richard Biener Nov. 5, 2011, 8:38 a.m. UTC | #5
On Sat, Nov 5, 2011 at 3:54 AM, Richard Henderson <rth@redhat.com> wrote:
> On 11/04/2011 07:36 PM, Richard Henderson wrote:
>> On 11/04/2011 03:36 AM, Richard Guenther wrote:
>>>>> +    case GIMPLE_TRANSACTION:
>>>>> +      return (weights->tm_cost
>>>>> +             + estimate_num_insns_seq (gimple_transaction_body (stmt),
>>>>> +                                       weights));
>>>>> +
>>> Huh, so we now have non-lowered gimple sub-sequence throughout all
>>> optimizations (inlining especially)?  :(
>>
>> No.  I'm not sure why we're still looking at gimple_transaction_body
>> here -- that should be NULL after lowering.
> ... of course, I'm not sure why we're looking at all those other
> nested statements there inside the inliner either.  At least we're
> doing the same thing as everyone else here.

It might be because of nested function lowering which works on
gimple like it falls out of the gimplifier.  So it might all be correct
after all ...

Sorry for the noise.

Richard.

>
> r~
>
Aldy Hernandez Nov. 5, 2011, 9:05 p.m. UTC | #6
[rth, see below]

>>    local_define_builtin ("__builtin_eh_pointer", ftype, BUILT_IN_EH_POINTER,
>>                         "__builtin_eh_pointer", ECF_PURE | ECF_NOTHROW |
>> ECF_LEAF);
>> +  if (flag_tm)
>> +    apply_tm_attr (builtin_decl_explicit (BUILT_IN_EH_POINTER),
>> +                  get_identifier ("transaction_pure"));
>
> I think this should use a new ECF_TM_PURE flag, unconditionally set
> with handling in the functions that handle/return ECF flags so that
> transitioning this to a tree node flag instead of an attribute is easier.

I could add a ECF_TM_PURE flag and attach it to the BUILT_IN_EH_POINTER 
in the local_define_builtin above, but we still need the attribute for 
function decl's as in:

__attribute__((transaction_pure)) void foo();

Attributes seem like a clean way to approach this.

I don't see what the flag buys us.  Or am I misunderstanding something?

>> +/* Nonzero if this call performs a transactional memory operation.  */
>> +#define ECF_TM_OPS               (1<<  11)
>
> What's this flag useful for?  Isn't it the case that you want to conservatively
> know whether a call might perform a tm operation?  Thus, the flag
> should be inverted?  Is this the same as "TM pure"?

Richard?

>> +    case GIMPLE_TRANSACTION:
>> +      return (weights->tm_cost
>> +             + estimate_num_insns_seq (gimple_transaction_body (stmt),
>> +                                       weights));
>> +
>
> Huh, so we now have non-lowered gimple sub-sequence throughout all
> optimizations (inlining especially)?  :(

Richard addressed this elsewhere.

> I think I miss tree-cfg.c parts that do any verification of the new gimple
> kinds.

Yes, they're there.  I see you commented on them in the middle/end 
patch.  I will fix the issues you brought up on that thread.

> ?  Why not use GF_CALL_CANNOT_INLINE?

As per Michael Matz's suggestion, I have removed all reference to this 
unused flag.

>> +static inline void
>> +gimple_call_set_noinline_p (gimple s)
>> +{
>> +  GIMPLE_CHECK (s, GIMPLE_CALL);
>> +  s->gsbase.subcode |= GF_CALL_NOINLINE;
>> +}
>
> See above.  We have *_cannot_inline already.

Similarly here.

Richi, I have fixed or addressed all the issues in this thread, with the 
exception of your EFC_TM_PURE and ECF_TM_OPS questions, which I am 
deferring to rth and then fixing if required.  I will now go through the 
middle-end thread (which erroneously also prefixed with [patch] 19/n...).

Aldy
Richard Biener Nov. 5, 2011, 10:09 p.m. UTC | #7
On Sat, Nov 5, 2011 at 10:05 PM, Aldy Hernandez <aldyh@redhat.com> wrote:
> [rth, see below]
>
>>>   local_define_builtin ("__builtin_eh_pointer", ftype,
>>> BUILT_IN_EH_POINTER,
>>>                        "__builtin_eh_pointer", ECF_PURE | ECF_NOTHROW |
>>> ECF_LEAF);
>>> +  if (flag_tm)
>>> +    apply_tm_attr (builtin_decl_explicit (BUILT_IN_EH_POINTER),
>>> +                  get_identifier ("transaction_pure"));
>>
>> I think this should use a new ECF_TM_PURE flag, unconditionally set
>> with handling in the functions that handle/return ECF flags so that
>> transitioning this to a tree node flag instead of an attribute is easier.
>
> I could add a ECF_TM_PURE flag and attach it to the BUILT_IN_EH_POINTER in
> the local_define_builtin above, but we still need the attribute for function
> decl's as in:
>
> __attribute__((transaction_pure)) void foo();
>
> Attributes seem like a clean way to approach this.

The middle-end interfacing is supposed to be via ECF_ flags, the user interface
via attributes.  What's the semantic of transaction-pure vs. ...

> I don't see what the flag buys us.  Or am I misunderstanding something?
>
>>> +/* Nonzero if this call performs a transactional memory operation.  */
>>> +#define ECF_TM_OPS               (1<<  11)
>>
>> What's this flag useful for?  Isn't it the case that you want to
>> conservatively
>> know whether a call might perform a tm operation?  Thus, the flag
>> should be inverted?  Is this the same as "TM pure"?

... this?

> Richard?

> Richi, I have fixed or addressed all the issues in this thread, with the
> exception of your EFC_TM_PURE and ECF_TM_OPS questions, which I am deferring
> to rth and then fixing if required.

Yeah, seems to be still an open question.

Thanks,
Richard.
diff mbox

Patch

Index: gcc/tree.c
===================================================================
--- gcc/tree.c	(.../trunk)	(revision 180744)
+++ gcc/tree.c	(.../branches/transactional-memory)	(revision 180773)
@@ -9594,6 +9594,9 @@  build_common_builtin_nodes (void)
  				    integer_type_node, NULL_TREE);
    local_define_builtin ("__builtin_eh_pointer", ftype, 
BUILT_IN_EH_POINTER,
  			"__builtin_eh_pointer", ECF_PURE | ECF_NOTHROW | ECF_LEAF);
+  if (flag_tm)
+    apply_tm_attr (builtin_decl_explicit (BUILT_IN_EH_POINTER),
+		   get_identifier ("transaction_pure"));

    tmp = lang_hooks.types.type_for_mode (targetm.eh_return_filter_mode 
(), 0);
    ftype = build_function_type_list (tmp, integer_type_node, NULL_TREE);
Index: gcc/tree.h
===================================================================
--- gcc/tree.h	(.../trunk)	(revision 180744)
+++ gcc/tree.h	(.../branches/transactional-memory)	(revision 180773)
@@ -539,6 +539,9 @@  struct GTY(()) tree_common {
         ENUM_IS_SCOPED in
  	   ENUMERAL_TYPE

+       TRANSACTION_EXPR_OUTER in
+           TRANSACTION_EXPR
+
     public_flag:

         TREE_OVERFLOW in
@@ -566,6 +569,9 @@  struct GTY(()) tree_common {
         OMP_CLAUSE_PRIVATE_DEBUG in
             OMP_CLAUSE_PRIVATE

+       TRANSACTION_EXPR_RELAXED in
+           TRANSACTION_EXPR
+
     private_flag:

         TREE_PRIVATE in
@@ -1808,6 +1814,14 @@  extern void protected_set_expr_location
  #define CALL_EXPR_ARGP(NODE) \
    (&(TREE_OPERAND (CALL_EXPR_CHECK (NODE), 0)) + 3)

+/* TM directives and accessors.  */
+#define TRANSACTION_EXPR_BODY(NODE) \
+  TREE_OPERAND (TRANSACTION_EXPR_CHECK (NODE), 0)
+#define TRANSACTION_EXPR_OUTER(NODE) \
+  (TRANSACTION_EXPR_CHECK (NODE)->base.static_flag)
+#define TRANSACTION_EXPR_RELAXED(NODE) \
+  (TRANSACTION_EXPR_CHECK (NODE)->base.public_flag)
+
  /* OpenMP directive and clause accessors.  */

  #define OMP_BODY(NODE) \
@@ -3452,6 +3466,34 @@  struct GTY(())
  #define DECL_NO_INLINE_WARNING_P(NODE) \
    (FUNCTION_DECL_CHECK (NODE)->function_decl.no_inline_warning_flag)

+/* Nonzero in a FUNCTION_DECL means this function is the transactional
+   clone of a function - called only from inside transactions.  */
+#define DECL_IS_TM_CLONE(NODE) \
+  (FUNCTION_DECL_CHECK (NODE)->function_decl.tm_clone_flag)
+
+/* Nonzero if a FUNCTION_CODE is a TM load/store.  */
+#define BUILTIN_TM_LOAD_STORE_P(FN) \
+  ((FN) >= BUILT_IN_TM_STORE_1 && (FN) <= BUILT_IN_TM_LOAD_RFW_LDOUBLE)
+
+/* Nonzero if a FUNCTION_CODE is a TM load.  */
+#define BUILTIN_TM_LOAD_P(FN) \
+  ((FN) >= BUILT_IN_TM_LOAD_1 && (FN) <= BUILT_IN_TM_LOAD_RFW_LDOUBLE)
+
+/* Nonzero if a FUNCTION_CODE is a TM store.  */
+#define BUILTIN_TM_STORE_P(FN) \
+  ((FN) >= BUILT_IN_TM_STORE_1 && (FN) <= BUILT_IN_TM_STORE_WAW_LDOUBLE)
+
+#define CASE_BUILT_IN_TM_LOAD(FN)	\
+  case BUILT_IN_TM_LOAD_##FN:		\
+  case BUILT_IN_TM_LOAD_RAR_##FN:	\
+  case BUILT_IN_TM_LOAD_RAW_##FN:	\
+  case BUILT_IN_TM_LOAD_RFW_##FN
+
+#define CASE_BUILT_IN_TM_STORE(FN)	\
+  case BUILT_IN_TM_STORE_##FN:		\
+  case BUILT_IN_TM_STORE_WAR_##FN:	\
+  case BUILT_IN_TM_STORE_WAW_##FN
+
  /* Nonzero in a FUNCTION_DECL that should be always inlined by the inliner
     disregarding size and cost heuristics.  This is equivalent to using
     the always_inline attribute without the required diagnostics if the
@@ -3539,8 +3581,9 @@  struct GTY(()) tree_function_decl {
    unsigned pure_flag : 1;
    unsigned looping_const_or_pure_flag : 1;
    unsigned has_debug_args_flag : 1;
+  unsigned tm_clone_flag : 1;

-  /* 2 bits left */
+  /* 1 bit left */
  };

  /* The source language of the translation-unit.  */
@@ -5174,6 +5217,8 @@  extern void expand_return (tree);

  /* In tree-eh.c */
  extern void using_eh_for_cleanups (void);
+extern int struct_ptr_eq (const void *, const void *);
+extern hashval_t struct_ptr_hash (const void *);

  /* In fold-const.c */

@@ -5543,6 +5588,8 @@  extern tree build_duplicate_type (tree);
  #define ECF_NOVOPS		  (1 << 9)
  /* The function does not lead to calls within current function unit.  */
  #define ECF_LEAF		  (1 << 10)
+/* Nonzero if this call performs a transactional memory operation.  */
+#define ECF_TM_OPS		  (1 << 11)

  extern int flags_from_decl_or_type (const_tree);
  extern int call_expr_flags (const_tree);
@@ -5593,6 +5640,8 @@  extern void init_attributes (void);
     a decl attribute to the declaration rather than to its type).  */
  extern tree decl_attributes (tree *, tree, int);

+extern void apply_tm_attr (tree, tree);
+
  /* In integrate.c */
  extern void set_decl_abstract_flags (tree, int);
  extern void set_decl_origin_self (tree);
@@ -5805,6 +5854,21 @@  extern unsigned HOST_WIDE_INT compute_bu
  extern unsigned HOST_WIDE_INT highest_pow2_factor (const_tree);
  extern tree build_personality_function (const char *);

+/* In trans-mem.c.  */
+extern tree build_tm_abort_call (location_t, bool);
+extern bool is_tm_safe (const_tree);
+extern bool is_tm_pure (const_tree);
+extern bool is_tm_may_cancel_outer (tree);
+extern bool is_tm_ending_fndecl (tree);
+extern void record_tm_replacement (tree, tree);
+extern void tm_malloc_replacement (tree);
+
+static inline bool
+is_tm_safe_or_pure (tree x)
+{
+  return is_tm_safe (x) || is_tm_pure (x);
+}
+
  /* In tree-inline.c.  */

  void init_inline_once (void);
Index: gcc/attribs.c
===================================================================
--- gcc/attribs.c	(.../trunk)	(revision 180744)
+++ gcc/attribs.c	(.../branches/transactional-memory)	(revision 180773)
@@ -166,7 +166,8 @@  init_attributes (void)
  	  gcc_assert (strcmp (attribute_tables[i][j].name,
  			      attribute_tables[i][k].name));
      }
-  /* Check that no name occurs in more than one table.  */
+  /* Check that no name occurs in more than one table.  Names that
+     begin with '*' are exempt, and may be overridden.  */
    for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
      {
        size_t j, k, l;
@@ -174,8 +175,9 @@  init_attributes (void)
        for (j = i + 1; j < ARRAY_SIZE (attribute_tables); j++)
  	for (k = 0; attribute_tables[i][k].name != NULL; k++)
  	  for (l = 0; attribute_tables[j][l].name != NULL; l++)
-	    gcc_assert (strcmp (attribute_tables[i][k].name,
-				attribute_tables[j][l].name));
+	    gcc_assert (attribute_tables[i][k].name[0] == '*'
+			|| strcmp (attribute_tables[i][k].name,
+				   attribute_tables[j][l].name));
      }
  #endif

@@ -207,7 +209,7 @@  register_attribute (const struct attribu
    slot = htab_find_slot_with_hash (attribute_hash, &str,
  				   substring_hash (str.str, str.length),
  				   INSERT);
-  gcc_assert (!*slot);
+  gcc_assert (!*slot || attr->name[0] == '*');
    *slot = (void *) CONST_CAST (struct attribute_spec *, attr);
  }

@@ -484,3 +486,12 @@  decl_attributes (tree *node, tree attrib

    return returned_attrs;
  }
+
+/* Subroutine of set_method_tm_attributes.  Apply TM attribute ATTR
+   to the method FNDECL.  */
+
+void
+apply_tm_attr (tree fndecl, tree attr)
+{
+  decl_attributes (&TREE_TYPE (fndecl), tree_cons (attr, NULL, NULL), 0);
+}
Index: gcc/targhooks.c
===================================================================
--- gcc/targhooks.c	(.../trunk)	(revision 180744)
+++ gcc/targhooks.c	(.../branches/transactional-memory)	(revision 180773)
@@ -1214,6 +1214,12 @@  default_have_conditional_execution (void
  #endif
  }

+tree
+default_builtin_tm_load_store (tree ARG_UNUSED (type))
+{
+  return NULL_TREE;
+}
+
  /* Compute cost of moving registers to/from memory.  */

  int
Index: gcc/targhooks.h
===================================================================
--- gcc/targhooks.h	(.../trunk)	(revision 180744)
+++ gcc/targhooks.h	(.../branches/transactional-memory)	(revision 180773)
@@ -152,6 +152,9 @@  extern bool default_addr_space_subset_p
  extern rtx default_addr_space_convert (rtx, tree, tree);
  extern unsigned int default_case_values_threshold (void);
  extern bool default_have_conditional_execution (void);
+
+extern tree default_builtin_tm_load_store (tree);
+
  extern int default_memory_move_cost (enum machine_mode, reg_class_t, 
bool);
  extern int default_register_move_cost (enum machine_mode, reg_class_t,
  				       reg_class_t);
Index: gcc/gimple.def
===================================================================
--- gcc/gimple.def	(.../trunk)	(revision 180744)
+++ gcc/gimple.def	(.../branches/transactional-memory)	(revision 180773)
@@ -124,6 +124,14 @@  DEFGSCODE(GIMPLE_ASM, "gimple_asm", GSS_
      CHAIN is the optional static chain link for nested functions.  */
  DEFGSCODE(GIMPLE_CALL, "gimple_call", GSS_CALL)

+/* GIMPLE_TRANSACTION <BODY, LABEL> represents __transaction_atomic and
+   __transaction_relaxed blocks.
+   BODY is the sequence of statements inside the transaction.
+   LABEL is a label for the statement immediately following the
+   transaction.  This is before RETURN so that it has MEM_OPS,
+   so that it can clobber global memory.  */
+DEFGSCODE(GIMPLE_TRANSACTION, "gimple_transaction", GSS_TRANSACTION)
+
  /* GIMPLE_RETURN <RETVAL> represents return statements.

     RETVAL is the value to return or NULL.  If a value is returned it
@@ -151,6 +159,12 @@  DEFGSCODE(GIMPLE_EH_FILTER, "gimple_eh_f
     be invoked if an exception propagates to this point.  */
  DEFGSCODE(GIMPLE_EH_MUST_NOT_THROW, "gimple_eh_must_not_throw", 
GSS_EH_MNT)

+/* GIMPLE_EH_ELSE <N_BODY, E_BODY> must be the sole contents of
+   a GIMPLE_TRY_FINALLY node.  For all normal exits from the try block,
+   we N_BODY is run; for all exception exits from the try block,
+   E_BODY is run.  */
+DEFGSCODE(GIMPLE_EH_ELSE, "gimple_eh_else", GSS_EH_ELSE)
+
  /* GIMPLE_RESX resumes execution after an exception.  */
  DEFGSCODE(GIMPLE_RESX, "gimple_resx", GSS_EH_CTRL)

Index: gcc/builtin-types.def
===================================================================
--- gcc/builtin-types.def	(.../trunk)	(revision 180744)
+++ gcc/builtin-types.def	(.../branches/transactional-memory)	(revision 
180773)
@@ -477,3 +477,24 @@  DEF_FUNCTION_TYPE_VAR_5 (BT_FN_INT_INT_I
  DEF_POINTER_TYPE (BT_PTR_FN_VOID_VAR, BT_FN_VOID_VAR)
  DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE,
  		     BT_PTR, BT_PTR_FN_VOID_VAR, BT_PTR, BT_SIZE)
+
+
+DEF_FUNCTION_TYPE_1 (BT_FN_I1_VPTR, BT_I1, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_I2_VPTR, BT_I2, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_I4_VPTR, BT_I4, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_I8_VPTR, BT_I8, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_VPTR, BT_FLOAT, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_VPTR, BT_DOUBLE, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_LDOUBLE_VPTR, BT_LONGDOUBLE, BT_VOLATILE_PTR)
+
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I1, BT_VOID, BT_VOLATILE_PTR, BT_I1)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I2, BT_VOID, BT_VOLATILE_PTR, BT_I2)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I4, BT_VOID, BT_VOLATILE_PTR, BT_I4)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I8, BT_VOID, BT_VOLATILE_PTR, BT_I8)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_FLOAT, BT_VOID, BT_VOLATILE_PTR, 
BT_FLOAT)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_DOUBLE, BT_VOID,
+                     BT_VOLATILE_PTR, BT_DOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_LDOUBLE, BT_VOID,
+		     BT_VOLATILE_PTR, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_SIZE, BT_VOID,
+		     BT_VOLATILE_PTR, BT_SIZE)
Index: gcc/builtins.def
===================================================================
--- gcc/builtins.def	(.../trunk)	(revision 180744)
+++ gcc/builtins.def	(.../branches/transactional-memory)	(revision 180773)
@@ -142,6 +142,13 @@  along with GCC; see the file COPYING3.
                 false, true, true, ATTRS, false, \
  	       (flag_openmp || flag_tree_parallelize_loops))

+/* Builtin used by the implementation of GNU TM.  These
+   functions are mapped to the actual implementation of the STM library. */
+#undef DEF_TM_BUILTIN
+#define DEF_TM_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+  DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE,    \
+               false, true, true, ATTRS, false, flag_tm)
+
  /* Define an attribute list for math functions that are normally
     "impure" because some of them may write into global memory for
     `errno'.  If !flag_errno_math they are instead "const".  */
@@ -624,6 +631,7 @@  DEF_GCC_BUILTIN        (BUILT_IN_APPLY_A
  DEF_GCC_BUILTIN        (BUILT_IN_BSWAP32, "bswap32", 
BT_FN_UINT32_UINT32, ATTR_CONST_NOTHROW_LEAF_LIST)
  DEF_GCC_BUILTIN        (BUILT_IN_BSWAP64, "bswap64", 
BT_FN_UINT64_UINT64, ATTR_CONST_NOTHROW_LEAF_LIST)
  DEF_EXT_LIB_BUILTIN    (BUILT_IN_CLEAR_CACHE, "__clear_cache", 
BT_FN_VOID_PTR_PTR, ATTR_NOTHROW_LEAF_LIST)
+/* [trans-mem]: Adjust BUILT_IN_TM_CALLOC if BUILT_IN_CALLOC is 
changed.  */
  DEF_LIB_BUILTIN        (BUILT_IN_CALLOC, "calloc", 
BT_FN_PTR_SIZE_SIZE, ATTR_MALLOC_NOTHROW_LEAF_LIST)
  DEF_GCC_BUILTIN        (BUILT_IN_CLASSIFY_TYPE, "classify_type", 
BT_FN_INT_VAR, ATTR_LEAF_LIST)
  DEF_GCC_BUILTIN        (BUILT_IN_CLZ, "clz", BT_FN_INT_UINT, 
ATTR_CONST_NOTHROW_LEAF_LIST)
@@ -662,6 +670,7 @@  DEF_EXT_LIB_BUILTIN    (BUILT_IN_FFSL, "
  DEF_EXT_LIB_BUILTIN    (BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLONG, 
ATTR_CONST_NOTHROW_LEAF_LIST)
  DEF_EXT_LIB_BUILTIN        (BUILT_IN_FORK, "fork", BT_FN_PID, 
ATTR_NOTHROW_LIST)
  DEF_GCC_BUILTIN        (BUILT_IN_FRAME_ADDRESS, "frame_address", 
BT_FN_PTR_UINT, ATTR_NULL)
+/* [trans-mem]: Adjust BUILT_IN_TM_FREE if BUILT_IN_FREE is changed.  */
  DEF_LIB_BUILTIN        (BUILT_IN_FREE, "free", BT_FN_VOID_PTR, 
ATTR_NOTHROW_LIST)
  DEF_GCC_BUILTIN        (BUILT_IN_FROB_RETURN_ADDR, "frob_return_addr", 
BT_FN_PTR_PTR, ATTR_NULL)
  DEF_EXT_LIB_BUILTIN    (BUILT_IN_GETTEXT, "gettext", 
BT_FN_STRING_CONST_STRING, ATTR_FORMAT_ARG_1)
@@ -698,6 +707,7 @@  DEF_GCC_BUILTIN        (BUILT_IN_ISUNORD
  DEF_LIB_BUILTIN        (BUILT_IN_LABS, "labs", BT_FN_LONG_LONG, 
ATTR_CONST_NOTHROW_LEAF_LIST)
  DEF_C99_BUILTIN        (BUILT_IN_LLABS, "llabs", 
BT_FN_LONGLONG_LONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
  DEF_GCC_BUILTIN        (BUILT_IN_LONGJMP, "longjmp", 
BT_FN_VOID_PTR_INT, ATTR_NORETURN_NOTHROW_LEAF_LIST)
+/* [trans-mem]: Adjust BUILT_IN_TM_MALLOC if BUILT_IN_MALLOC is 
changed.  */
  DEF_LIB_BUILTIN        (BUILT_IN_MALLOC, "malloc", BT_FN_PTR_SIZE, 
ATTR_MALLOC_NOTHROW_LEAF_LIST)
  DEF_GCC_BUILTIN        (BUILT_IN_NEXT_ARG, "next_arg", BT_FN_PTR_VAR, 
ATTR_LEAF_LIST)
  DEF_GCC_BUILTIN        (BUILT_IN_PARITY, "parity", BT_FN_INT_UINT, 
ATTR_CONST_NOTHROW_LEAF_LIST)
@@ -793,3 +803,6 @@  DEF_BUILTIN_STUB (BUILT_IN_EH_COPY_VALUE

  /* OpenMP builtins.  */
  #include "omp-builtins.def"
+
+/* GTM builtins. */
+#include "gtm-builtins.def"
Index: gcc/gimple-low.c
===================================================================
--- gcc/gimple-low.c	(.../trunk)	(revision 180744)
+++ gcc/gimple-low.c	(.../branches/transactional-memory)	(revision 180773)
@@ -396,6 +396,11 @@  lower_stmt (gimple_stmt_iterator *gsi, s
        lower_sequence (gimple_eh_filter_failure (stmt), data);
        break;

+    case GIMPLE_EH_ELSE:
+      lower_sequence (gimple_eh_else_n_body (stmt), data);
+      lower_sequence (gimple_eh_else_e_body (stmt), data);
+      break;
+
      case GIMPLE_NOP:
      case GIMPLE_ASM:
      case GIMPLE_ASSIGN:
@@ -446,6 +451,10 @@  lower_stmt (gimple_stmt_iterator *gsi, s
        data->cannot_fallthru = false;
        return;

+    case GIMPLE_TRANSACTION:
+      lower_sequence (gimple_transaction_body (stmt), data);
+      break;
+
      default:
        gcc_unreachable ();
      }
@@ -727,6 +736,10 @@  gimple_stmt_may_fallthru (gimple stmt)
        return (gimple_seq_may_fallthru (gimple_try_eval (stmt))
  	      && gimple_seq_may_fallthru (gimple_try_cleanup (stmt)));

+    case GIMPLE_EH_ELSE:
+      return (gimple_seq_may_fallthru (gimple_eh_else_n_body (stmt))
+	      || gimple_seq_may_fallthru (gimple_eh_else_e_body (stmt)));
+
      case GIMPLE_CALL:
        /* Functions that do not return do not fall through.  */
        return (gimple_call_flags (stmt) & ECF_NORETURN) == 0;
Index: gcc/gsstruct.def
===================================================================
--- gcc/gsstruct.def	(.../trunk)	(revision 180744)
+++ gcc/gsstruct.def	(.../branches/transactional-memory)	(revision 180773)
@@ -38,6 +38,7 @@  DEFGSSTRUCT(GSS_CATCH, gimple_statement_
  DEFGSSTRUCT(GSS_EH_FILTER, gimple_statement_eh_filter, false)
  DEFGSSTRUCT(GSS_EH_MNT, gimple_statement_eh_mnt, false)
  DEFGSSTRUCT(GSS_EH_CTRL, gimple_statement_eh_ctrl, false)
+DEFGSSTRUCT(GSS_EH_ELSE, gimple_statement_eh_else, false)
  DEFGSSTRUCT(GSS_WCE, gimple_statement_wce, false)
  DEFGSSTRUCT(GSS_OMP, gimple_statement_omp, false)
  DEFGSSTRUCT(GSS_OMP_CRITICAL, gimple_statement_omp_critical, false)
@@ -49,3 +50,4 @@  DEFGSSTRUCT(GSS_OMP_SINGLE, gimple_state
  DEFGSSTRUCT(GSS_OMP_CONTINUE, gimple_statement_omp_continue, false)
  DEFGSSTRUCT(GSS_OMP_ATOMIC_LOAD, gimple_statement_omp_atomic_load, false)
  DEFGSSTRUCT(GSS_OMP_ATOMIC_STORE, gimple_statement_omp_atomic_store, 
false)
+DEFGSSTRUCT(GSS_TRANSACTION, gimple_statement_transaction, false)
Index: gcc/tree-eh.c
===================================================================
--- gcc/tree-eh.c	(.../trunk)	(revision 180744)
+++ gcc/tree-eh.c	(.../branches/transactional-memory)	(revision 180773)
@@ -58,7 +58,7 @@  using_eh_for_cleanups (void)
     pointer.  Assumes all pointers are interchangeable, which is sort
     of already assumed by gcc elsewhere IIRC.  */

-static int
+int
  struct_ptr_eq (const void *a, const void *b)
  {
    const void * const * x = (const void * const *) a;
@@ -66,7 +66,7 @@  struct_ptr_eq (const void *a, const void
    return *x == *y;
  }

-static hashval_t
+hashval_t
  struct_ptr_hash (const void *a)
  {
    const void * const * x = (const void * const *) a;
@@ -284,6 +284,11 @@  collect_finally_tree (gimple stmt, gimpl
        collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
        break;

+    case GIMPLE_EH_ELSE:
+      collect_finally_tree_1 (gimple_eh_else_n_body (stmt), region);
+      collect_finally_tree_1 (gimple_eh_else_e_body (stmt), region);
+      break;
+
      default:
        /* A type, a decl, or some kind of statement that we're not
  	 interested in.  Don't walk them.  */
@@ -534,6 +539,10 @@  replace_goto_queue_1 (gimple stmt, struc
      case GIMPLE_EH_FILTER:
        replace_goto_queue_stmt_list (gimple_eh_filter_failure (stmt), tf);
        break;
+    case GIMPLE_EH_ELSE:
+      replace_goto_queue_stmt_list (gimple_eh_else_n_body (stmt), tf);
+      replace_goto_queue_stmt_list (gimple_eh_else_e_body (stmt), tf);
+      break;

      default:
        /* These won't have gotos in them.  */
@@ -921,6 +930,21 @@  lower_try_finally_fallthru_label (struct
    return label;
  }

+/* A subroutine of lower_try_finally.  If FINALLY consits of a
+   GIMPLE_EH_ELSE node, return it.  */
+
+static inline gimple
+get_eh_else (gimple_seq finally)
+{
+  gimple x = gimple_seq_first_stmt (finally);
+  if (gimple_code (x) == GIMPLE_EH_ELSE)
+    {
+      gcc_assert (gimple_seq_singleton_p (finally));
+      return x;
+    }
+  return NULL;
+}
+
  /* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
     langhook returns non-null, then the language requires that the 
exception
     path out of a try_finally be treated specially.  To wit: the code 
within
@@ -950,7 +974,7 @@  honor_protect_cleanup_actions (struct le
    gimple_stmt_iterator gsi;
    bool finally_may_fallthru;
    gimple_seq finally;
-  gimple x;
+  gimple x, eh_else;

    /* First check for nothing to do.  */
    if (lang_hooks.eh_protect_cleanup_actions == NULL)
@@ -960,12 +984,18 @@  honor_protect_cleanup_actions (struct le
      return;

    finally = gimple_try_cleanup (tf->top_p);
-  finally_may_fallthru = gimple_seq_may_fallthru (finally);
+  eh_else = get_eh_else (finally);

    /* Duplicate the FINALLY block.  Only need to do this for try-finally,
-     and not for cleanups.  */
-  if (this_state)
+     and not for cleanups.  If we've got an EH_ELSE, extract it now.  */
+  if (eh_else)
+    {
+      finally = gimple_eh_else_e_body (eh_else);
+      gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
+    }
+  else if (this_state)
      finally = lower_try_finally_dup_block (finally, outer_state);
+  finally_may_fallthru = gimple_seq_may_fallthru (finally);

    /* If this cleanup consists of a TRY_CATCH_EXPR with 
TRY_CATCH_IS_CLEANUP
       set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
@@ -1011,7 +1041,7 @@  lower_try_finally_nofallthru (struct leh
  			      struct leh_tf_state *tf)
  {
    tree lab;
-  gimple x;
+  gimple x, eh_else;
    gimple_seq finally;
    struct goto_queue_node *q, *qe;

@@ -1034,15 +1064,35 @@  lower_try_finally_nofallthru (struct leh

    replace_goto_queue (tf);

-  lower_eh_constructs_1 (state, finally);
-  gimple_seq_add_seq (&tf->top_p_seq, finally);
+  /* Emit the finally block into the stream.  Lower EH_ELSE at this 
time.  */
+  eh_else = get_eh_else (finally);
+  if (eh_else)
+    {
+      finally = gimple_eh_else_n_body (eh_else);
+      lower_eh_constructs_1 (state, finally);
+      gimple_seq_add_seq (&tf->top_p_seq, finally);

-  if (tf->may_throw)
+      if (tf->may_throw)
+	{
+	  finally = gimple_eh_else_e_body (eh_else);
+	  lower_eh_constructs_1 (state, finally);
+
+	  emit_post_landing_pad (&eh_seq, tf->region);
+	  gimple_seq_add_seq (&eh_seq, finally);
+	}
+    }
+  else
      {
-      emit_post_landing_pad (&eh_seq, tf->region);
+      lower_eh_constructs_1 (state, finally);
+      gimple_seq_add_seq (&tf->top_p_seq, finally);

-      x = gimple_build_goto (lab);
-      gimple_seq_add_stmt (&eh_seq, x);
+      if (tf->may_throw)
+	{
+	  emit_post_landing_pad (&eh_seq, tf->region);
+
+	  x = gimple_build_goto (lab);
+	  gimple_seq_add_stmt (&eh_seq, x);
+	}
      }
  }

@@ -1062,6 +1112,18 @@  lower_try_finally_onedest (struct leh_st
    finally = gimple_try_cleanup (tf->top_p);
    tf->top_p_seq = gimple_try_eval (tf->top_p);

+  /* Since there's only one destination, and the destination edge can only
+     either be EH or non-EH, that implies that all of our incoming edges
+     are of the same type.  Therefore we can lower EH_ELSE immediately.  */
+  x = get_eh_else (finally);
+  if (x)
+    {
+      if (tf->may_throw)
+        finally = gimple_eh_else_e_body (x);
+      else
+        finally = gimple_eh_else_n_body (x);
+    }
+
    lower_eh_constructs_1 (state, finally);

    if (tf->may_throw)
@@ -1132,11 +1194,18 @@  lower_try_finally_copy (struct leh_state
    gimple_seq finally;
    gimple_seq new_stmt;
    gimple_seq seq;
-  gimple x;
+  gimple x, eh_else;
    tree tmp;
    location_t tf_loc = gimple_location (tf->try_finally_expr);

    finally = gimple_try_cleanup (tf->top_p);
+
+  /* Notice EH_ELSE, and simplify some of the remaining code
+     by considering FINALLY to be the normal return path only.  */
+  eh_else = get_eh_else (finally);
+  if (eh_else)
+    finally = gimple_eh_else_n_body (eh_else);
+
    tf->top_p_seq = gimple_try_eval (tf->top_p);
    new_stmt = NULL;

@@ -1153,7 +1222,12 @@  lower_try_finally_copy (struct leh_state

    if (tf->may_throw)
      {
-      seq = lower_try_finally_dup_block (finally, state);
+      /* We don't need to copy the EH path of EH_ELSE,
+	 since it is only emitted once.  */
+      if (eh_else)
+        seq = gimple_eh_else_e_body (eh_else);
+      else
+        seq = lower_try_finally_dup_block (finally, state);
        lower_eh_constructs_1 (state, seq);

        emit_post_landing_pad (&eh_seq, tf->region);
@@ -1252,7 +1326,7 @@  lower_try_finally_switch (struct leh_sta
    tree last_case;
    VEC (tree,heap) *case_label_vec;
    gimple_seq switch_body;
-  gimple x;
+  gimple x, eh_else;
    tree tmp;
    gimple switch_stmt;
    gimple_seq finally;
@@ -1263,9 +1337,10 @@  lower_try_finally_switch (struct leh_sta
    location_t finally_loc;

    switch_body = gimple_seq_alloc ();
+  finally = gimple_try_cleanup (tf->top_p);
+  eh_else = get_eh_else (finally);

    /* Mash the TRY block to the head of the chain.  */
-  finally = gimple_try_cleanup (tf->top_p);
    tf->top_p_seq = gimple_try_eval (tf->top_p);

    /* The location of the finally is either the last stmt in the finally
@@ -1281,7 +1356,7 @@  lower_try_finally_switch (struct leh_sta
    nlabels = VEC_length (tree, tf->dest_array);
    return_index = nlabels;
    eh_index = return_index + tf->may_return;
-  fallthru_index = eh_index + tf->may_throw;
+  fallthru_index = eh_index + (tf->may_throw && !eh_else);
    ndests = fallthru_index + tf->may_fallthru;

    finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
@@ -1319,7 +1394,23 @@  lower_try_finally_switch (struct leh_sta
        gimple_seq_add_stmt (&switch_body, x);
      }

-  if (tf->may_throw)
+  /* For EH_ELSE, emit the exception path (plus resx) now, then
+     subsequently we only need consider the normal path.  */
+  if (eh_else)
+    {
+      if (tf->may_throw)
+	{
+	  finally = gimple_eh_else_e_body (eh_else);
+	  lower_eh_constructs_1 (state, finally);
+
+	  emit_post_landing_pad (&eh_seq, tf->region);
+	  gimple_seq_add_seq (&eh_seq, finally);
+	  emit_resx (&eh_seq, tf->region);
+	}
+
+      finally = gimple_eh_else_n_body (eh_else);
+    }
+  else if (tf->may_throw)
      {
        emit_post_landing_pad (&eh_seq, tf->region);

@@ -1452,12 +1543,22 @@  lower_try_finally_switch (struct leh_sta
     the estimate of the size of the switch machinery we'd have to add.  */

  static bool
-decide_copy_try_finally (int ndests, gimple_seq finally)
+decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
  {
    int f_estimate, sw_estimate;
+  gimple eh_else;
+
+  /* If there's an EH_ELSE involved, the exception path is separate
+     and really doesn't come into play for this computation.  */
+  eh_else = get_eh_else (finally);
+  if (eh_else)
+    {
+      ndests -= may_throw;
+      finally = gimple_eh_else_n_body (eh_else);
+    }

    if (!optimize)
-    return false;
+    return ndests == 1;

    /* Finally estimate N times, plus N gotos.  */
    f_estimate = count_insns_seq (finally, &eni_size_weights);
@@ -1563,7 +1664,8 @@  lower_try_finally (struct leh_state *sta
    /* We can easily special-case redirection to a single destination.  */
    else if (ndests == 1)
      lower_try_finally_onedest (state, &this_tf);
-  else if (decide_copy_try_finally (ndests, gimple_try_cleanup (tp)))
+  else if (decide_copy_try_finally (ndests, this_tf.may_throw,
+				    gimple_try_cleanup (tp)))
      lower_try_finally_copy (state, &this_tf);
    else
      lower_try_finally_switch (state, &this_tf);
@@ -1928,6 +2030,9 @@  lower_eh_constructs_2 (struct leh_state
  		case GIMPLE_EH_MUST_NOT_THROW:
  		    replace = lower_eh_must_not_throw (state, stmt);
  		    break;
+		case GIMPLE_EH_ELSE:
+		    /* This code is only valid with GIMPLE_TRY_FINALLY.  */
+		    gcc_unreachable ();
  		default:
  		    replace = lower_cleanup (state, stmt);
  		    break;
@@ -1942,6 +2047,10 @@  lower_eh_constructs_2 (struct leh_state
        /* Return since we don't want gsi_next () */
        return;

+    case GIMPLE_EH_ELSE:
+      /* We should be eliminating this in lower_try_finally et al.  */
+      gcc_unreachable ();
+
      default:
        /* A type, a decl, or some kind of statement that we're not
  	 interested in.  Don't walk them.  */
@@ -2832,6 +2941,10 @@  refactor_eh_r (gimple_seq seq)
  	  case GIMPLE_EH_FILTER:
  	    refactor_eh_r (gimple_eh_filter_failure (one));
  	    break;
+	  case GIMPLE_EH_ELSE:
+	    refactor_eh_r (gimple_eh_else_n_body (one));
+	    refactor_eh_r (gimple_eh_else_e_body (one));
+	    break;
  	  default:
  	    break;
  	  }
Index: gcc/gimple-pretty-print.c
===================================================================
--- gcc/gimple-pretty-print.c	(.../trunk)	(revision 180744)
+++ gcc/gimple-pretty-print.c	(.../branches/transactional-memory) 
(revision 180773)
@@ -33,6 +33,7 @@  along with GCC; see the file COPYING3.
  #include "tree-pass.h"
  #include "gimple.h"
  #include "value-prof.h"
+#include "trans-mem.h"

  #define INDENT(SPACE)							\
    do { int i; for (i = 0; i < SPACE; i++) pp_space (buffer); } while (0)
@@ -162,6 +163,7 @@  debug_gimple_seq (gimple_seq seq)
       'd' - outputs an int as a decimal,
       's' - outputs a string,
       'n' - outputs a newline,
+     'x' - outputs an int as hexadecimal,
       '+' - increases indent by 2 then outputs a newline,
       '-' - decreases indent by 2 then outputs a newline.   */

@@ -216,6 +218,10 @@  dump_gimple_fmt (pretty_printer *buffer,
                  newline_and_indent (buffer, spc);
                  break;

+              case 'x':
+                pp_scalar (buffer, "%x", va_arg (args, int));
+                break;
+
                case '+':
                  spc += 2;
                  newline_and_indent (buffer, spc);
@@ -622,6 +628,7 @@  static void
  dump_gimple_call (pretty_printer *buffer, gimple gs, int spc, int flags)
  {
    tree lhs = gimple_call_lhs (gs);
+  tree fn = gimple_call_fn (gs);

    if (flags & TDF_ALIAS)
      {
@@ -648,8 +655,7 @@  dump_gimple_call (pretty_printer *buffer
  	dump_gimple_fmt (buffer, spc, flags, "%G <%s, %T", gs,
  			 internal_fn_name (gimple_call_internal_fn (gs)), lhs);
        else
-	dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T",
-			 gs, gimple_call_fn (gs), lhs);
+	dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T", gs, fn, lhs);
        if (gimple_call_num_args (gs) > 0)
          {
            pp_string (buffer, ", ");
@@ -672,7 +678,7 @@  dump_gimple_call (pretty_printer *buffer
        if (gimple_call_internal_p (gs))
  	pp_string (buffer, internal_fn_name (gimple_call_internal_fn (gs)));
        else
-	print_call_name (buffer, gimple_call_fn (gs), flags);
+	print_call_name (buffer, fn, flags);
        pp_string (buffer, " (");
        dump_gimple_call_args (buffer, gs, flags);
        pp_character (buffer, ')');
@@ -689,9 +695,63 @@  dump_gimple_call (pretty_printer *buffer

    if (gimple_call_return_slot_opt_p (gs))
      pp_string (buffer, " [return slot optimization]");
-
    if (gimple_call_tail_p (gs))
      pp_string (buffer, " [tail call]");
+
+  /* Dump the arguments of _ITM_beginTransaction sanely.  */
+  if (TREE_CODE (fn) == ADDR_EXPR)
+    fn = TREE_OPERAND (fn, 0);
+  if (TREE_CODE (fn) == FUNCTION_DECL && DECL_IS_TM_CLONE (fn))
+    pp_string (buffer, " [tm-clone]");
+  if (TREE_CODE (fn) == FUNCTION_DECL
+      && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
+      && DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_START
+      /* Check we're referring to Intel's TM specifications.  */
+      && !strcmp (IDENTIFIER_POINTER (DECL_NAME (fn)),
+		  "__builtin__ITM_beginTransaction")
+      && gimple_call_num_args (gs) > 0
+      )
+    {
+      tree t = gimple_call_arg (gs, 0);
+      unsigned HOST_WIDE_INT props;
+      gcc_assert (TREE_CODE (t) == INTEGER_CST);
+
+      pp_string (buffer, " [ ");
+
+      /* Get the transaction code properties.  */
+      props = TREE_INT_CST_LOW (t);
+
+      if (props & PR_INSTRUMENTEDCODE)
+	pp_string (buffer, "instrumentedCode ");
+      if (props & PR_UNINSTRUMENTEDCODE)
+	pp_string (buffer, "uninstrumentedCode ");
+      if (props & PR_HASNOXMMUPDATE)
+	pp_string (buffer, "hasNoXMMUpdate ");
+      if (props & PR_HASNOABORT)
+	pp_string (buffer, "hasNoAbort ");
+      if (props & PR_HASNOIRREVOCABLE)
+	pp_string (buffer, "hasNoIrrevocable ");
+      if (props & PR_DOESGOIRREVOCABLE)
+	pp_string (buffer, "doesGoIrrevocable ");
+      if (props & PR_HASNOSIMPLEREADS)
+	pp_string (buffer, "hasNoSimpleReads ");
+      if (props & PR_AWBARRIERSOMITTED)
+	pp_string (buffer, "awBarriersOmitted ");
+      if (props & PR_RARBARRIERSOMITTED)
+	pp_string (buffer, "RaRBarriersOmitted ");
+      if (props & PR_UNDOLOGCODE)
+	pp_string (buffer, "undoLogCode ");
+      if (props & PR_PREFERUNINSTRUMENTED)
+	pp_string (buffer, "preferUninstrumented ");
+      if (props & PR_EXCEPTIONBLOCK)
+	pp_string (buffer, "exceptionBlock ");
+      if (props & PR_HASELSE)
+	pp_string (buffer, "hasElse ");
+      if (props & PR_READONLY)
+	pp_string (buffer, "readOnly ");
+
+      pp_string (buffer, "]");
+    }
  }


@@ -947,6 +1007,24 @@  dump_gimple_eh_must_not_throw (pretty_pr
  }


+/* Dump a GIMPLE_EH_ELSE tuple on the pretty_printer BUFFER, SPC spaces of
+   indent.  FLAGS specifies details to show in the dump (see TDF_* in
+   tree-pass.h).  */
+
+static void
+dump_gimple_eh_else (pretty_printer *buffer, gimple gs, int spc, int flags)
+{
+  if (flags & TDF_RAW)
+    dump_gimple_fmt (buffer, spc, flags,
+                     "%G <%+N_BODY <%S>%nE_BODY <%S>%->", gs,
+                     gimple_eh_else_n_body (gs), gimple_eh_else_e_body 
(gs));
+  else
+    dump_gimple_fmt (buffer, spc, flags,
+                    "<<<if_normal_exit>>>%+{%S}%-<<<else_eh_exit>>>%+{%S}",
+                     gimple_eh_else_n_body (gs), gimple_eh_else_e_body 
(gs));
+}
+
+
  /* Dump a GIMPLE_RESX tuple on the pretty_printer BUFFER, SPC spaces of
     indent.  FLAGS specifies details to show in the dump (see TDF_* in
     tree-pass.h).  */
@@ -1269,6 +1347,86 @@  dump_gimple_omp_return (pretty_printer *
      }
  }

+/* Dump a GIMPLE_TRANSACTION tuple on the pretty_printer BUFFER.  */
+
+static void
+dump_gimple_transaction (pretty_printer *buffer, gimple gs, int spc, 
int flags)
+{
+  unsigned subcode = gimple_transaction_subcode (gs);
+
+  if (flags & TDF_RAW)
+    {
+      dump_gimple_fmt (buffer, spc, flags,
+		       "%G [SUBCODE=%x,LABEL=%T] <%+BODY <%S> >",
+		       gs, subcode, gimple_transaction_label (gs),
+		       gimple_transaction_body (gs));
+    }
+  else
+    {
+      if (subcode & GTMA_IS_OUTER)
+	pp_string (buffer, "__transaction_atomic [[outer]]");
+      else if (subcode & GTMA_IS_RELAXED)
+	pp_string (buffer, "__transaction_relaxed");
+      else
+	pp_string (buffer, "__transaction_atomic");
+      subcode &= ~GTMA_DECLARATION_MASK;
+
+      if (subcode || gimple_transaction_label (gs))
+	{
+	  pp_string (buffer, "  //");
+	  if (gimple_transaction_label (gs))
+	    {
+	      pp_string (buffer, " LABEL=");
+	      dump_generic_node (buffer, gimple_transaction_label (gs),
+				 spc, flags, false);
+	    }
+	  if (subcode)
+	    {
+	      pp_string (buffer, " SUBCODE=[ ");
+	      if (subcode & GTMA_HAVE_ABORT)
+		{
+		  pp_string (buffer, "GTMA_HAVE_ABORT ");
+		  subcode &= ~GTMA_HAVE_ABORT;
+		}
+	      if (subcode & GTMA_HAVE_LOAD)
+		{
+		  pp_string (buffer, "GTMA_HAVE_LOAD ");
+		  subcode &= ~GTMA_HAVE_LOAD;
+		}
+	      if (subcode & GTMA_HAVE_STORE)
+		{
+		  pp_string (buffer, "GTMA_HAVE_STORE ");
+		  subcode &= ~GTMA_HAVE_STORE;
+		}
+	      if (subcode & GTMA_MAY_ENTER_IRREVOCABLE)
+		{
+		  pp_string (buffer, "GTMA_MAY_ENTER_IRREVOCABLE ");
+		  subcode &= ~GTMA_MAY_ENTER_IRREVOCABLE;
+		}
+	      if (subcode & GTMA_DOES_GO_IRREVOCABLE)
+		{
+		  pp_string (buffer, "GTMA_DOES_GO_IRREVOCABLE ");
+		  subcode &= ~GTMA_DOES_GO_IRREVOCABLE;
+		}
+	      if (subcode)
+		pp_printf (buffer, "0x%x ", subcode);
+	      pp_string (buffer, "]");
+	    }
+	}
+
+      if (!gimple_seq_empty_p (gimple_transaction_body (gs)))
+	{
+	  newline_and_indent (buffer, spc + 2);
+	  pp_character (buffer, '{');
+	  pp_newline (buffer);
+	  dump_gimple_seq (buffer, gimple_transaction_body (gs),
+			   spc + 4, flags);
+	  newline_and_indent (buffer, spc + 2);
+	  pp_character (buffer, '}');
+	}
+    }
+}
+
  /* Dump a GIMPLE_ASM tuple on the pretty_printer BUFFER, SPC spaces of
     indent.  FLAGS specifies details to show in the dump (see TDF_* in
     tree-pass.h).  */
@@ -1855,6 +2013,10 @@  dump_gimple_stmt (pretty_printer *buffer
        dump_gimple_eh_must_not_throw (buffer, gs, spc, flags);
        break;

+    case GIMPLE_EH_ELSE:
+      dump_gimple_eh_else (buffer, gs, spc, flags);
+      break;
+
      case GIMPLE_RESX:
        dump_gimple_resx (buffer, gs, spc, flags);
        break;
@@ -1877,6 +2039,10 @@  dump_gimple_stmt (pretty_printer *buffer
        pp_string (buffer, " predictor.");
        break;

+    case GIMPLE_TRANSACTION:
+      dump_gimple_transaction (buffer, gs, spc, flags);
+      break;
+
      default:
        GIMPLE_NIY;
      }
Index: gcc/gimplify.c
===================================================================
--- gcc/gimplify.c	(.../trunk)	(revision 180744)
+++ gcc/gimplify.c	(.../branches/transactional-memory)	(revision 180773)
@@ -413,6 +413,8 @@  create_tmp_var_name (const char *prefix)
        char *preftmp = ASTRDUP (prefix);

        remove_suffix (preftmp, strlen (preftmp));
+      clean_symbol_name (preftmp);
+
        prefix = preftmp;
      }

@@ -1072,6 +1074,12 @@  voidify_wrapper_expr (tree wrapper, tree
  		}
  	      break;

+	    case TRANSACTION_EXPR:
+	      TREE_SIDE_EFFECTS (*p) = 1;
+	      TREE_TYPE (*p) = void_type_node;
+	      p = &TRANSACTION_EXPR_BODY (*p);
+	      break;
+
  	    default:
  	      goto out;
  	    }
@@ -6527,6 +6535,53 @@  gimplify_omp_atomic (tree *expr_p, gimpl
     return GS_ALL_DONE;
  }

+/* Gimplify a TRANSACTION_EXPR.  This involves gimplification of the
+   body, and adding some EH bits.  */
+
+static enum gimplify_status
+gimplify_transaction (tree *expr_p, gimple_seq *pre_p)
+{
+  tree expr = *expr_p, temp, tbody = TRANSACTION_EXPR_BODY (expr);
+  gimple g;
+  gimple_seq body = NULL;
+  struct gimplify_ctx gctx;
+  int subcode = 0;
+
+  /* Wrap the transaction body in a BIND_EXPR so we have a context
+     where to put decls for OpenMP.  */
+  if (TREE_CODE (tbody) != BIND_EXPR)
+    {
+      tree bind = build3 (BIND_EXPR, void_type_node, NULL, tbody, NULL);
+      TREE_SIDE_EFFECTS (bind) = 1;
+      SET_EXPR_LOCATION (bind, EXPR_LOCATION (tbody));
+      TRANSACTION_EXPR_BODY (expr) = bind;
+    }
+
+  push_gimplify_context (&gctx);
+  temp = voidify_wrapper_expr (*expr_p, NULL);
+
+  g = gimplify_and_return_first (TRANSACTION_EXPR_BODY (expr), &body);
+  pop_gimplify_context (g);
+
+  g = gimple_build_transaction (body, NULL);
+  if (TRANSACTION_EXPR_OUTER (expr))
+    subcode = GTMA_IS_OUTER;
+  else if (TRANSACTION_EXPR_RELAXED (expr))
+    subcode = GTMA_IS_RELAXED;
+  gimple_transaction_set_subcode (g, subcode);
+
+  gimplify_seq_add_stmt (pre_p, g);
+
+  if (temp)
+    {
+      *expr_p = temp;
+      return GS_OK;
+    }
+
+  *expr_p = NULL_TREE;
+  return GS_ALL_DONE;
+}
+
  /* Convert the GENERIC expression tree *EXPR_P to GIMPLE.  If the
     expression produces a value to be used as an operand inside a GIMPLE
     statement, the value will be stored back in *EXPR_P.  This value will
@@ -7251,6 +7306,10 @@  gimplify_expr (tree *expr_p, gimple_seq
  	  ret = gimplify_omp_atomic (expr_p, pre_p);
  	  break;

+        case TRANSACTION_EXPR:
+          ret = gimplify_transaction (expr_p, pre_p);
+          break;
+
  	case TRUTH_AND_EXPR:
  	case TRUTH_OR_EXPR:
  	case TRUTH_XOR_EXPR:
Index: gcc/calls.c
===================================================================
--- gcc/calls.c	(.../trunk)	(revision 180744)
+++ gcc/calls.c	(.../branches/transactional-memory)	(revision 180773)
@@ -496,7 +496,60 @@  emit_call_1 (rtx funexp, tree fntree ATT
  static int
  special_function_p (const_tree fndecl, int flags)
  {
-  if (fndecl && DECL_NAME (fndecl)
+  if (fndecl == NULL)
+    return flags;
+
+  if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+    {
+      switch (DECL_FUNCTION_CODE (fndecl))
+	{
+	case BUILT_IN_TM_COMMIT:
+	case BUILT_IN_TM_COMMIT_EH:
+	case BUILT_IN_TM_ABORT:
+	case BUILT_IN_TM_IRREVOCABLE:
+	case BUILT_IN_TM_GETTMCLONE_IRR:
+	case BUILT_IN_TM_MEMCPY:
+	case BUILT_IN_TM_MEMMOVE:
+        case BUILT_IN_TM_MEMSET:
+	CASE_BUILT_IN_TM_STORE (1):
+	CASE_BUILT_IN_TM_STORE (2):
+	CASE_BUILT_IN_TM_STORE (4):
+	CASE_BUILT_IN_TM_STORE (8):
+	CASE_BUILT_IN_TM_STORE (FLOAT):
+	CASE_BUILT_IN_TM_STORE (DOUBLE):
+	CASE_BUILT_IN_TM_STORE (LDOUBLE):
+	CASE_BUILT_IN_TM_STORE (M64):
+	CASE_BUILT_IN_TM_STORE (M128):
+	CASE_BUILT_IN_TM_STORE (M256):
+	CASE_BUILT_IN_TM_LOAD (1):
+	CASE_BUILT_IN_TM_LOAD (2):
+	CASE_BUILT_IN_TM_LOAD (4):
+	CASE_BUILT_IN_TM_LOAD (8):
+	CASE_BUILT_IN_TM_LOAD (FLOAT):
+	CASE_BUILT_IN_TM_LOAD (DOUBLE):
+	CASE_BUILT_IN_TM_LOAD (LDOUBLE):
+	CASE_BUILT_IN_TM_LOAD (M64):
+	CASE_BUILT_IN_TM_LOAD (M128):
+	CASE_BUILT_IN_TM_LOAD (M256):
+	case BUILT_IN_TM_LOG:
+	case BUILT_IN_TM_LOG_1:
+	case BUILT_IN_TM_LOG_2:
+	case BUILT_IN_TM_LOG_4:
+	case BUILT_IN_TM_LOG_8:
+	case BUILT_IN_TM_LOG_FLOAT:
+	case BUILT_IN_TM_LOG_DOUBLE:
+	case BUILT_IN_TM_LOG_LDOUBLE:
+	case BUILT_IN_TM_LOG_M64:
+	case BUILT_IN_TM_LOG_M128:
+	case BUILT_IN_TM_LOG_M256:
+	  flags |= ECF_TM_OPS;
+	  break;
+	default:
+	  break;
+	}
+    }
+
+  if (DECL_NAME (fndecl)
        && IDENTIFIER_LENGTH (DECL_NAME (fndecl)) <= 17
        /* Exclude functions not at the file scope, or not `extern',
  	 since they are not the magic functions we would otherwise
@@ -644,6 +697,9 @@  flags_from_decl_or_type (const_tree exp)
        if (TREE_NOTHROW (exp))
  	flags |= ECF_NOTHROW;

+      if (DECL_IS_TM_CLONE (exp))
+	flags |= ECF_TM_OPS;
+
        flags = special_function_p (exp, flags);
      }
    else if (TYPE_P (exp) && TYPE_READONLY (exp))
Index: gcc/tree-inline.c
===================================================================
--- gcc/tree-inline.c	(.../trunk)	(revision 180744)
+++ gcc/tree-inline.c	(.../branches/transactional-memory)	(revision 180773)
@@ -1365,6 +1365,12 @@  remap_gimple_stmt (gimple stmt, copy_bod
  	    = gimple_build_omp_critical (s1, gimple_omp_critical_name (stmt));
  	  break;

+	case GIMPLE_TRANSACTION:
+	  s1 = remap_gimple_seq (gimple_transaction_body (stmt), id);
+	  copy = gimple_build_transaction (s1, gimple_transaction_label (stmt));
+	  gimple_transaction_set_subcode (copy, gimple_transaction_subcode 
(stmt));
+	  break;
+
  	default:
  	  gcc_unreachable ();
  	}
@@ -3600,6 +3606,11 @@  estimate_num_insns (gimple stmt, eni_wei
        return (weights->omp_cost
                + estimate_num_insns_seq (gimple_omp_body (stmt), weights));

+    case GIMPLE_TRANSACTION:
+      return (weights->tm_cost
+	      + estimate_num_insns_seq (gimple_transaction_body (stmt),
+					weights));
+
      default:
        gcc_unreachable ();
      }
@@ -3639,6 +3650,7 @@  init_inline_once (void)
    eni_size_weights.target_builtin_call_cost = 1;
    eni_size_weights.div_mod_cost = 1;
    eni_size_weights.omp_cost = 40;
+  eni_size_weights.tm_cost = 10;
    eni_size_weights.time_based = false;
    eni_size_weights.return_cost = 1;

@@ -3650,6 +3662,7 @@  init_inline_once (void)
    eni_time_weights.target_builtin_call_cost = 1;
    eni_time_weights.div_mod_cost = 10;
    eni_time_weights.omp_cost = 40;
+  eni_time_weights.tm_cost = 40;
    eni_time_weights.time_based = true;
    eni_time_weights.return_cost = 2;
  }
Index: gcc/tree-inline.h
===================================================================
--- gcc/tree-inline.h	(.../trunk)	(revision 180744)
+++ gcc/tree-inline.h	(.../branches/transactional-memory)	(revision 180773)
@@ -144,6 +144,9 @@  typedef struct eni_weights_d
    /* Cost for omp construct.  */
    unsigned omp_cost;

+  /* Cost for tm transaction.  */
+  unsigned tm_cost;
+
    /* Cost of return.  */
    unsigned return_cost;

Index: gcc/gimple.c
===================================================================
--- gcc/gimple.c	(.../trunk)	(revision 180744)
+++ gcc/gimple.c	(.../branches/transactional-memory)	(revision 180773)
@@ -743,6 +743,17 @@  gimple_build_eh_must_not_throw (tree dec
    return p;
  }

+/* Build a GIMPLE_EH_ELSE statement.  */
+
+gimple
+gimple_build_eh_else (gimple_seq n_body, gimple_seq e_body)
+{
+  gimple p = gimple_alloc (GIMPLE_EH_ELSE, 0);
+  gimple_eh_else_set_n_body (p, n_body);
+  gimple_eh_else_set_e_body (p, e_body);
+  return p;
+}
+
  /* Build a GIMPLE_TRY statement.

     EVAL is the expression to evaluate.
@@ -1146,6 +1157,17 @@  gimple_build_omp_atomic_store (tree val)
    return p;
  }

+/* Build a GIMPLE_TRANSACTION statement.  */
+
+gimple
+gimple_build_transaction (gimple_seq body, tree label)
+{
+  gimple p = gimple_alloc (GIMPLE_TRANSACTION, 0);
+  gimple_transaction_set_body (p, body);
+  gimple_transaction_set_label (p, label);
+  return p;
+}
+
  /* Build a GIMPLE_PREDICT statement.  PREDICT is one of the predictors 
from
     predict.def, OUTCOME is NOT_TAKEN or TAKEN.  */

@@ -1331,7 +1353,7 @@  walk_gimple_seq (gimple_seq seq, walk_st
  {
    gimple_stmt_iterator gsi;

-  for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
+  for (gsi = gsi_start (seq); !gsi_end_p (gsi); )
      {
        tree ret = walk_gimple_stmt (&gsi, callback_stmt, callback_op, wi);
        if (ret)
@@ -1340,8 +1362,12 @@  walk_gimple_seq (gimple_seq seq, walk_st
  	     to hold it.  */
  	  gcc_assert (wi);
  	  wi->callback_result = ret;
-	  return gsi_stmt (gsi);
+
+	  return wi->removed_stmt ? NULL : gsi_stmt (gsi);
  	}
+
+      if (!wi->removed_stmt)
+	gsi_next (&gsi);
      }

    if (wi)
@@ -1680,6 +1706,13 @@  walk_gimple_op (gimple stmt, walk_tree_f
  	return ret;
        break;

+    case GIMPLE_TRANSACTION:
+      ret = walk_tree (gimple_transaction_label_ptr (stmt), callback_op,
+		       wi, pset);
+      if (ret)
+	return ret;
+      break;
+
        /* Tuples that do not have operands.  */
      case GIMPLE_NOP:
      case GIMPLE_RESX:
@@ -1730,10 +1763,13 @@  walk_gimple_stmt (gimple_stmt_iterator *
    gimple stmt = gsi_stmt (*gsi);

    if (wi)
-    wi->gsi = *gsi;
+    {
+      wi->gsi = *gsi;
+      wi->removed_stmt = false;

-  if (wi && wi->want_locations && gimple_has_location (stmt))
-    input_location = gimple_location (stmt);
+      if (wi->want_locations && gimple_has_location (stmt))
+	input_location = gimple_location (stmt);
+    }

    ret = NULL;

@@ -1751,6 +1787,8 @@  walk_gimple_stmt (gimple_stmt_iterator *
        gcc_assert (tree_ret == NULL);

        /* Re-read stmt in case the callback changed it.  */
+      if (wi && wi->removed_stmt)
+	return NULL;
        stmt = gsi_stmt (*gsi);
      }

@@ -1786,6 +1824,17 @@  walk_gimple_stmt (gimple_stmt_iterator *
  	return wi->callback_result;
        break;

+    case GIMPLE_EH_ELSE:
+      ret = walk_gimple_seq (gimple_eh_else_n_body (stmt),
+			     callback_stmt, callback_op, wi);
+      if (ret)
+	return wi->callback_result;
+      ret = walk_gimple_seq (gimple_eh_else_e_body (stmt),
+			     callback_stmt, callback_op, wi);
+      if (ret)
+	return wi->callback_result;
+      break;
+
      case GIMPLE_TRY:
        ret = walk_gimple_seq (gimple_try_eval (stmt), callback_stmt, 
callback_op,
  	                     wi);
@@ -1813,8 +1862,8 @@  walk_gimple_stmt (gimple_stmt_iterator *
      case GIMPLE_OMP_TASK:
      case GIMPLE_OMP_SECTIONS:
      case GIMPLE_OMP_SINGLE:
-      ret = walk_gimple_seq (gimple_omp_body (stmt), callback_stmt, 
callback_op,
-	                     wi);
+      ret = walk_gimple_seq (gimple_omp_body (stmt), callback_stmt,
+			     callback_op, wi);
        if (ret)
  	return wi->callback_result;
        break;
@@ -1826,6 +1875,13 @@  walk_gimple_stmt (gimple_stmt_iterator *
  	return wi->callback_result;
        break;

+    case GIMPLE_TRANSACTION:
+      ret = walk_gimple_seq (gimple_transaction_body (stmt),
+			     callback_stmt, callback_op, wi);
+      if (ret)
+	return wi->callback_result;
+      break;
+
      default:
        gcc_assert (!gimple_has_substatements (stmt));
        break;
@@ -2252,6 +2308,13 @@  gimple_copy (gimple stmt)
  	  gimple_eh_filter_set_types (copy, t);
  	  break;

+	case GIMPLE_EH_ELSE:
+	  new_seq = gimple_seq_copy (gimple_eh_else_n_body (stmt));
+	  gimple_eh_else_set_n_body (copy, new_seq);
+	  new_seq = gimple_seq_copy (gimple_eh_else_e_body (stmt));
+	  gimple_eh_else_set_e_body (copy, new_seq);
+	  break;
+
  	case GIMPLE_TRY:
  	  new_seq = gimple_seq_copy (gimple_try_eval (stmt));
  	  gimple_try_set_eval (copy, new_seq);
@@ -2327,6 +2390,11 @@  gimple_copy (gimple stmt)
  	  gimple_omp_set_body (copy, new_seq);
  	  break;

+        case GIMPLE_TRANSACTION:
+	  new_seq = gimple_seq_copy (gimple_transaction_body (stmt));
+	  gimple_transaction_set_body (copy, new_seq);
+	  break;
+
  	case GIMPLE_WITH_CLEANUP_EXPR:
  	  new_seq = gimple_seq_copy (gimple_wce_cleanup (stmt));
  	  gimple_wce_set_cleanup (copy, new_seq);
@@ -2785,7 +2853,7 @@  is_gimple_address (const_tree t)
  /* Strip out all handled components that produce invariant
     offsets.  */

-static const_tree
+const_tree
  strip_invariant_refs (const_tree op)
  {
    while (handled_component_p (op))
@@ -3085,6 +3153,8 @@  get_call_expr_in (tree t)
      t = TREE_OPERAND (t, 1);
    if (TREE_CODE (t) == WITH_SIZE_EXPR)
      t = TREE_OPERAND (t, 0);
+  if (TREE_CODE (t) == VIEW_CONVERT_EXPR)
+    t = TREE_OPERAND (t, 0);
    if (TREE_CODE (t) == CALL_EXPR)
      return t;
    return NULL_TREE;
Index: gcc/gimple.h
===================================================================
--- gcc/gimple.h	(.../trunk)	(revision 180744)
+++ gcc/gimple.h	(.../branches/transactional-memory)	(revision 180773)
@@ -105,6 +105,7 @@  enum gf_mask {
      GF_CALL_NOTHROW		= 1 << 5,
      GF_CALL_ALLOCA_FOR_VAR	= 1 << 6,
      GF_CALL_INTERNAL		= 1 << 7,
+    GF_CALL_NOINLINE		= 1 << 8,
      GF_OMP_PARALLEL_COMBINED	= 1 << 0,

      /* True on an GIMPLE_OMP_RETURN statement if the return does not