Patchwork Backported 6 patches to 4.6 branch

login
register
mail settings
Submitter Jakub Jelinek
Date Feb. 14, 2012, 11:44 p.m.
Message ID <20120214234412.GZ18768@tyan-ft48-01.lab.bos.redhat.com>
Download mbox | patch
Permalink /patch/141215/
State New
Headers show

Comments

Jakub Jelinek - Feb. 14, 2012, 11:44 p.m.
Hi!

Further backported patches, bootstrapped/regtested on x86_64-linux and
i686-linux, committed to 4.6 branch.  It has been a while since 4.6.2
has been released, are there any blockers that should be resolved before
4.6.3-rc1?

	Jakub
2012-02-14  Jakub Jelinek  <jakub@redhat.com>

	PR bootstrap/51969
	Backported from mainline
	2011-11-08  Michael Matz  <matz@suse.de>

	* gengtype.c (write_field_root): Avoid out-of-scope access of newv.
2012-02-14  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2012-02-13  Jakub Jelinek  <jakub@redhat.com>

	PR middle-end/52230
	* omp-low.c (expand_omp_for): If a static schedule without
	chunk size has NULL region->cont, force fd.chunk_size to be
	integer_zero_node.

--- gcc/omp-low.c	(revision 184164)
+++ gcc/omp-low.c	(revision 184165)
@@ -4664,6 +4664,9 @@ expand_omp_for (struct omp_region *regio
     {
       int fn_index, start_ix, next_ix;
 
+      if (fd.chunk_size == NULL
+	  && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
+	fd.chunk_size = integer_zero_node;
       gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
       fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
 		  ? 3 : fd.sched_kind;
2012-02-14  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2012-02-13  Jakub Jelinek  <jakub@redhat.com>

	* cselib.c (dump_cselib_val): Don't assume l->setting_insn is
	non-NULL.

--- gcc/cselib.c	(revision 184167)
+++ gcc/cselib.c	(revision 184168)
@@ -2688,8 +2688,11 @@ dump_cselib_val (void **x, void *info)
       fputs (" locs:", out);
       do
 	{
-	  fprintf (out, "\n  from insn %i ",
-		   INSN_UID (l->setting_insn));
+	  if (l->setting_insn)
+	    fprintf (out, "\n  from insn %i ",
+		     INSN_UID (l->setting_insn));
+	  else
+	    fprintf (out, "\n   ");
 	  print_inline_rtx (out, l->loc, 4);
 	}
       while ((l = l->next));
2012-02-14  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2012-02-14  Jakub Jelinek  <jakub@redhat.com>

	PR c/52181
	* c-decl.c (merge_decls): Copy DECL_USER_ALIGN bit from olddecl to
	newdecl.

	* decl.c (duplicate_decls): If olddecl has bigger DECL_ALIGN than
	newdecl, copy DECL_ALIGN to newdecl and or DECL_USER_ALIGN bits.

	* c-c++-common/pr52181.c: New test.

--- gcc/c-decl.c	(revision 184192)
+++ gcc/c-decl.c	(revision 184193)
@@ -2449,6 +2449,7 @@ merge_decls (tree newdecl, tree olddecl,
     memcpy ((char *) olddecl + sizeof (struct tree_common),
 	    (char *) newdecl + sizeof (struct tree_common),
 	    sizeof (struct tree_decl_common) - sizeof (struct tree_common));
+    DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl);
     switch (TREE_CODE (olddecl))
       {
       case FUNCTION_DECL:
--- gcc/cp/decl.c	(revision 184192)
+++ gcc/cp/decl.c	(revision 184193)
@@ -2214,7 +2214,12 @@ duplicate_decls (tree newdecl, tree oldd
       SET_DECL_INIT_PRIORITY (olddecl, DECL_INIT_PRIORITY (newdecl));
       DECL_HAS_INIT_PRIORITY_P (olddecl) = 1;
     }
-  /* Likewise for DECL_USER_ALIGN and DECL_PACKED.  */
+  /* Likewise for DECL_ALIGN, DECL_USER_ALIGN and DECL_PACKED.  */
+  if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl))
+    {
+      DECL_ALIGN (newdecl) = DECL_ALIGN (olddecl);
+      DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl);
+    }
   DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl);
   if (TREE_CODE (newdecl) == FIELD_DECL)
     DECL_PACKED (olddecl) = DECL_PACKED (newdecl);
--- gcc/testsuite/c-c++-common/pr52181.c	(revision 0)
+++ gcc/testsuite/c-c++-common/pr52181.c	(revision 184193)
@@ -0,0 +1,13 @@
+/* PR c/52181 */
+/* { dg-do compile } */
+
+extern const int v1[];
+const int __attribute__((aligned(16))) v1[] = { 1 };
+extern const int __attribute__((aligned(16))) v2[];
+const int v2[] = { 1 };
+extern const int __attribute__((aligned(16))) v3[];
+const int __attribute__((aligned(16))) v3[] = { 1 };
+const int __attribute__((aligned(16))) v4[] = { 1 };
+int test[(__alignof__ (v4) != __alignof__ (v1)		/* { dg-bogus "is negative" } */
+	 || __alignof__ (v4) != __alignof__ (v2)
+	 || __alignof__ (v4) != __alignof__ (v3)) ? -1 : 0];
2012-02-14  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2012-02-14  Jakub Jelinek  <jakub@redhat.com>

	PR debug/51950
	* dwarf2out.c (clone_tree_hash): New function.
	(copy_decls_walk): Use it instead of clone_tree.

--- gcc/dwarf2out.c	(revision 184223)
+++ gcc/dwarf2out.c	(revision 184224)
@@ -7407,6 +7407,32 @@ copy_ancestor_tree (dw_die_ref unit, dw_
   return copy;
 }
 
+/* Like clone_tree, but additionally enter all the children into
+   the hash table decl_table.  */
+
+static dw_die_ref
+clone_tree_hash (dw_die_ref die, htab_t decl_table)
+{
+  dw_die_ref c;
+  dw_die_ref clone = clone_die (die);
+  struct decl_table_entry *entry;
+  void **slot = htab_find_slot_with_hash (decl_table, die,
+					  htab_hash_pointer (die), INSERT);
+  /* Assert that DIE isn't in the hash table yet.  If it would be there
+     before, the ancestors would be necessarily there as well, therefore
+     clone_tree_hash wouldn't be called.  */
+  gcc_assert (*slot == HTAB_EMPTY_ENTRY);
+  entry = XCNEW (struct decl_table_entry);
+  entry->orig = die;
+  entry->copy = clone;
+  *slot = entry;
+
+  FOR_EACH_CHILD (die, c,
+		  add_child_die (clone, clone_tree_hash (c, decl_table)));
+
+  return clone;
+}
+
 /* Walk the DIE and its children, looking for references to incomplete
    or trivial types that are unmarked (i.e., that are not in the current
    type_unit).  */
@@ -7443,7 +7469,11 @@ copy_decls_walk (dw_die_ref unit, dw_die
           else
             {
               dw_die_ref parent = unit;
-              dw_die_ref copy = clone_tree (targ);
+	      dw_die_ref copy = clone_die (targ);
+
+	      FOR_EACH_CHILD (targ, c,
+			      add_child_die (copy,
+					     clone_tree_hash (c, decl_table)));
 
               /* Make sure the cloned tree is marked as part of the
                  type unit.  */
2012-02-14  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2012-02-14  Jakub Jelinek  <jakub@redhat.com>

	PR c++/52247
	* pt.c (tsubst_copy_asm_operands): For LABEL_DECL values call
	lookup_label on label's name and set TREE_USED.

	* g++.dg/template/asmgoto1.C: New test.

--- gcc/cp/pt.c	(revision 184228)
+++ gcc/cp/pt.c	(revision 184229)
@@ -12612,8 +12612,17 @@ tsubst_copy_asm_operands (tree t, tree a
   if (purpose)
     purpose = RECUR (purpose);
   value = TREE_VALUE (t);
-  if (value && TREE_CODE (value) != LABEL_DECL)
-    value = RECUR (value);
+  if (value)
+    {
+      if (TREE_CODE (value) != LABEL_DECL)
+	value = RECUR (value);
+      else
+	{
+	  value = lookup_label (DECL_NAME (value));
+	  gcc_assert (TREE_CODE (value) == LABEL_DECL);
+	  TREE_USED (value) = 1;
+	}
+    }
   chain = TREE_CHAIN (t);
   if (chain && chain != void_type_node)
     chain = RECUR (chain);
--- gcc/testsuite/g++.dg/template/asmgoto1.C	(revision 0)
+++ gcc/testsuite/g++.dg/template/asmgoto1.C	(revision 184229)
@@ -0,0 +1,18 @@
+// PR c++/52247
+// { dg-do compile }
+
+template <int N>
+bool
+bar ()
+{
+  __asm goto ("" : : : : lab);
+  return true;
+lab:
+  return false;
+}
+
+bool
+foo ()
+{
+  return bar<0> ();
+}
Eric Botcazou - Feb. 15, 2012, 12:18 a.m.
> Further backported patches, bootstrapped/regtested on x86_64-linux and
> i686-linux, committed to 4.6 branch.  It has been a while since 4.6.2
> has been released, are there any blockers that should be resolved before
> 4.6.3-rc1?

PR target/51921 (the pending patch is under PR target/52205).

Last time I checked, there was a couple of C++ failures on the branch.

Patch

--- gcc/gengtype.c	(revision 181171)
+++ gcc/gengtype.c	(revision 181172)
@@ -3651,14 +3651,13 @@  write_field_root (outf_p f, pair_p v, ty
 		  int has_length, struct fileloc *line, const char *if_marked,
 		  bool emit_pch, type_p field_type, const char *field_name)
 {
+  struct pair newv;
   /* If the field reference is relative to V, rather than to some
      subcomponent of V, we can mark any subarrays with a single stride.
      We're effectively treating the field as a global variable in its
      own right.  */
   if (v && type == v->type)
     {
-      struct pair newv;
-
       newv = *v;
       newv.type = field_type;
       newv.name = ACONCAT ((v->name, ".", field_name, NULL));