@@ -1,3 +1,7 @@
+2016-04-08 Thomas Schwinge <thomas@codesourcery.com>
+
+ * gimple.def: Remove GIMPLE_OMP_ENTRY_END. Remove all uses.
+
2016-03-11 Cesar Philippidis <cesar@codesourcery.com>
* config/nvptx/nvptx.c (nvptx_gen_shuffle): Add support for QImode
@@ -313,7 +313,6 @@ lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data)
case GIMPLE_OMP_ATOMIC_LOAD:
case GIMPLE_OMP_ATOMIC_STORE:
case GIMPLE_OMP_CONTINUE:
- case GIMPLE_OMP_ENTRY_END:
break;
case GIMPLE_CALL:
@@ -2304,10 +2304,6 @@ pp_gimple_stmt_1 (pretty_printer *buffer, gimple *gs, int spc, int flags)
pp_string (buffer, "GIMPLE_SECTIONS_SWITCH");
break;
- case GIMPLE_OMP_ENTRY_END:
- pp_string (buffer, "GIMPLE_OMP_ENTRY_END");
- break;
-
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_SECTION:
@@ -828,15 +828,6 @@ gimple_build_debug_source_bind_stat (tree var, tree value,
}
-/* Build a GIMPLE_OMP_ENTRY_END statement. */
-
-gimple *
-gimple_build_omp_entry_end (void)
-{
- return gimple_alloc (GIMPLE_OMP_ENTRY_END, 0);
-}
-
-
/* Build a GIMPLE_OMP_CRITICAL statement.
BODY is the sequence of statements for which only one thread can execute.
@@ -227,10 +227,6 @@ DEFGSCODE(GIMPLE_OMP_ATOMIC_STORE, "gimple_omp_atomic_store",
iteration in partially lowered OpenMP code. */
DEFGSCODE(GIMPLE_OMP_CONTINUE, "gimple_omp_continue", GSS_OMP_CONTINUE)
-/* GIMPLE_OMP_ENTRY_END marks the end of the unpredicated entry block
- into an offloaded region. */
-DEFGSCODE(GIMPLE_OMP_ENTRY_END, "gimple_omp_entry_end", GSS_BASE)
-
/* GIMPLE_OMP_CRITICAL <NAME, BODY> represents
#pragma omp critical [name]
@@ -1451,7 +1451,6 @@ gdebug *gimple_build_debug_bind_stat (tree, tree, gimple * MEM_STAT_DECL);
gdebug *gimple_build_debug_source_bind_stat (tree, tree, gimple * MEM_STAT_DECL);
#define gimple_build_debug_source_bind(var,val,stmt) \
gimple_build_debug_source_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
-gimple *gimple_build_omp_entry_end (void);
gomp_critical *gimple_build_omp_critical (gimple_seq, tree, tree);
gomp_for *gimple_build_omp_for (gimple_seq, int, tree, size_t, gimple_seq);
gomp_parallel *gimple_build_omp_parallel (gimple_seq, tree, tree, tree);
@@ -6061,7 +6060,6 @@ gimple_return_set_retbnd (gimple *gs, tree retval)
case GIMPLE_OMP_ORDERED: \
case GIMPLE_OMP_CRITICAL: \
case GIMPLE_OMP_RETURN: \
- case GIMPLE_OMP_ENTRY_END: \
case GIMPLE_OMP_ATOMIC_LOAD: \
case GIMPLE_OMP_ATOMIC_STORE: \
case GIMPLE_OMP_CONTINUE: \
@@ -13159,14 +13159,6 @@ expand_omp_target (struct omp_region *region)
if (gimple_omp_target_kind (entry_stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS)
mark_loops_in_oacc_kernels_region (region->entry, region->exit);
- basic_block entry_succ_bb = single_succ (entry_bb);
- if (offloaded)
- {
- gsi = gsi_last_bb (entry_succ_bb);
- if (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ENTRY_END)
- gsi_remove (&gsi, true);
- }
-
if (offloaded)
{
unsigned srcidx, dstidx, num;
@@ -13187,6 +13179,7 @@ expand_omp_target (struct omp_region *region)
tree data_arg = gimple_omp_target_data_arg (entry_stmt);
if (data_arg)
{
+ basic_block entry_succ_bb = single_succ (entry_bb);
gimple_stmt_iterator gsi;
tree arg;
gimple *tgtcopy_stmt = NULL;
@@ -14063,8 +14056,6 @@ build_omp_regions_1 (basic_block bb, struct omp_region *parent,
gcc_assert (parent);
parent->cont = bb;
}
- else if (code == GIMPLE_OMP_ENTRY_END)
- gcc_assert (parent);
else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
{
/* GIMPLE_OMP_SECTIONS_SWITCH is part of
@@ -16978,9 +16969,6 @@ lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
false, NULL, NULL, &fork_seq, &join_seq, ctx);
}
- if (offloaded)
- gimple_seq_add_stmt (&new_body, gimple_build_omp_entry_end ());
-
gimple_seq_add_seq (&new_body, fork_seq);
gimple_seq_add_seq (&new_body, tgt_body);
gimple_seq_add_seq (&new_body, join_seq);
@@ -18501,7 +18489,6 @@ make_gimple_omp_edges (basic_block bb, struct omp_region **region,
fallthru = false;
break;
- case GIMPLE_OMP_ENTRY_END:
case GIMPLE_OMP_ATOMIC_LOAD:
case GIMPLE_OMP_ATOMIC_STORE:
fallthru = true;