diff mbox

Fix -fcompare-debug issues caused by recent VRP assert expr sorting changes (PR debug/81278)

Message ID 20170704124152.GM2123@tucnak
State New
Headers show

Commit Message

Jakub Jelinek July 4, 2017, 12:41 p.m. UTC
On Tue, Jul 04, 2017 at 02:00:13PM +0200, Richard Biener wrote:
> > That was intentional.  If a->e != NULL, then we know that b->e != NULL,
> > because we have
> >   else if (a->e != NULL && b->e == NULL)
> >     return -1;
> > earlier.  Similarly, if a->e == NULL, then we know that b-> == NULL, because
> > we have:
> >   if (a->e == NULL && b->e != NULL)
> >     return 1;
> > earlier.
> 
> Ah, ok.  Twisty ;)  I suppose jump threading will have eliminated
> the extra test.

In the first case maybe, I doubt it would do that after the
iterative_hash_expr calls which are likely not pure.

> > > Otherwise looks ok to me.  I wonder if we should merge the two
> > > sorting functions and change behavior with a global var or a
> > > template parameter instead (to reduce source duplication).  Does
> > > 
> > > vec.qsort (function_template<true>);
> > > 
> > > work?
> > 
> > Let me try that.

Seems to work, so like this if it passes bootstrap/regtest?

2017-07-04  Jakub Jelinek  <jakub@redhat.com>

	PR debug/81278
	* tree-vrp.c (compare_assert_loc): Turn into a function template
	with stable template parameter.  Only test if a->e is NULL,
	!a->e == !b->e has been verified already.  Use e == NULL or
	e != NULL instead of e or ! e tests.  If stable is true, don't use
	iterative_hash_expr, on the other side allow a or b or both NULL
	and sort the NULLs last.
	(process_assert_insertions): Sort using compare_assert_loc<false>
	instead of compare_assert_loc, later sort using
	compare_assert_loc<true> before calling process_assert_insertions_for
	in a loop.  Use break instead of continue once seen NULL pointer.



	Jakub

Comments

Richard Biener July 4, 2017, 2:06 p.m. UTC | #1
On July 4, 2017 2:41:52 PM GMT+02:00, Jakub Jelinek <jakub@redhat.com> wrote:
>On Tue, Jul 04, 2017 at 02:00:13PM +0200, Richard Biener wrote:
>> > That was intentional.  If a->e != NULL, then we know that b->e !=
>NULL,
>> > because we have
>> >   else if (a->e != NULL && b->e == NULL)
>> >     return -1;
>> > earlier.  Similarly, if a->e == NULL, then we know that b-> ==
>NULL, because
>> > we have:
>> >   if (a->e == NULL && b->e != NULL)
>> >     return 1;
>> > earlier.
>> 
>> Ah, ok.  Twisty ;)  I suppose jump threading will have eliminated
>> the extra test.
>
>In the first case maybe, I doubt it would do that after the
>iterative_hash_expr calls which are likely not pure.
>
>> > > Otherwise looks ok to me.  I wonder if we should merge the two
>> > > sorting functions and change behavior with a global var or a
>> > > template parameter instead (to reduce source duplication).  Does
>> > > 
>> > > vec.qsort (function_template<true>);
>> > > 
>> > > work?
>> > 
>> > Let me try that.
>
>Seems to work, so like this if it passes bootstrap/regtest?

OK.

Richard.

>2017-07-04  Jakub Jelinek  <jakub@redhat.com>
>
>	PR debug/81278
>	* tree-vrp.c (compare_assert_loc): Turn into a function template
>	with stable template parameter.  Only test if a->e is NULL,
>	!a->e == !b->e has been verified already.  Use e == NULL or
>	e != NULL instead of e or ! e tests.  If stable is true, don't use
>	iterative_hash_expr, on the other side allow a or b or both NULL
>	and sort the NULLs last.
>	(process_assert_insertions): Sort using compare_assert_loc<false>
>	instead of compare_assert_loc, later sort using
>	compare_assert_loc<true> before calling process_assert_insertions_for
>	in a loop.  Use break instead of continue once seen NULL pointer.
>
>--- gcc/tree-vrp.c.jj	2017-07-04 10:43:48.627706528 +0200
>+++ gcc/tree-vrp.c	2017-07-04 14:37:06.823101453 +0200
>@@ -6393,20 +6393,37 @@ process_assert_insertions_for (tree name
>   gcc_unreachable ();
> }
> 
>-/* Qsort helper for sorting assert locations.  */
>+/* Qsort helper for sorting assert locations.  If stable is true,
>don't
>+   use iterative_hash_expr because it can be unstable for
>-fcompare-debug,
>+   on the other side some pointers might be NULL.  */
> 
>+template <bool stable>
> static int
> compare_assert_loc (const void *pa, const void *pb)
> {
>   assert_locus * const a = *(assert_locus * const *)pa;
>   assert_locus * const b = *(assert_locus * const *)pb;
>-  if (! a->e && b->e)
>+
>+  /* If stable, some asserts might be optimized away already, sort
>+     them last.  */
>+  if (stable)
>+    {
>+      if (a == NULL)
>+	return b != NULL;
>+      else if (b == NULL)
>+	return -1;
>+    }
>+
>+  if (a->e == NULL && b->e != NULL)
>     return 1;
>-  else if (a->e && ! b->e)
>+  else if (a->e != NULL && b->e == NULL)
>     return -1;
> 
>+  /* After the above checks, we know that (a->e == NULL) == (b->e ==
>NULL),
>+     no need to test both a->e and b->e.  */
>+
>   /* Sort after destination index.  */
>-  if (! a->e && ! b->e)
>+  if (a->e == NULL)
>     ;
>   else if (a->e->dest->index > b->e->dest->index)
>     return 1;
>@@ -6419,11 +6436,27 @@ compare_assert_loc (const void *pa, cons
>   else if (a->comp_code < b->comp_code)
>     return -1;
> 
>+  hashval_t ha, hb;
>+
>+  /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
>+     uses DECL_UID of the VAR_DECL, so sorting might differ between
>+     -g and -g0.  When doing the removal of redundant assert exprs
>+     and commonization to successors, this does not matter, but for
>+     the final sort needs to be stable.  */
>+  if (stable)
>+    {
>+      ha = 0;
>+      hb = 0;
>+    }
>+  else
>+    {
>+      ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val,
>0));
>+      hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val,
>0));
>+    }
>+
>   /* Break the tie using hashing and source/bb index.  */
>-  hashval_t ha = iterative_hash_expr (a->expr, iterative_hash_expr
>(a->val, 0));
>-  hashval_t hb = iterative_hash_expr (b->expr, iterative_hash_expr
>(b->val, 0));
>   if (ha == hb)
>-    return (a->e && b->e
>+    return (a->e != NULL
> 	    ? a->e->src->index - b->e->src->index
> 	    : a->bb->index - b->bb->index);
>   return ha - hb;
>@@ -6452,7 +6485,7 @@ process_assert_insertions (void)
>       auto_vec<assert_locus *, 16> asserts;
>       for (; loc; loc = loc->next)
> 	asserts.safe_push (loc);
>-      asserts.qsort (compare_assert_loc);
>+      asserts.qsort (compare_assert_loc<false>);
> 
>/* Push down common asserts to successors and remove redundant ones. 
>*/
>       unsigned ecnt = 0;
>@@ -6506,11 +6539,14 @@ process_assert_insertions (void)
> 	    }
> 	}
> 
>+      /* The asserts vector sorting above might be unstable for
>+	 -fcompare-debug, sort again to ensure a stable sort.  */
>+      asserts.qsort (compare_assert_loc<true>);
>       for (unsigned j = 0; j < asserts.length (); ++j)
> 	{
> 	  loc = asserts[j];
> 	  if (! loc)
>-	    continue;
>+	    break;
>	  update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
> 	  num_asserts++;
> 	  free (loc);
>
>
>	Jakub
Jeff Law July 4, 2017, 3:07 p.m. UTC | #2
On 07/04/2017 06:41 AM, Jakub Jelinek wrote:
> On Tue, Jul 04, 2017 at 02:00:13PM +0200, Richard Biener wrote:
>>> That was intentional.  If a->e != NULL, then we know that b->e != NULL,
>>> because we have
>>>   else if (a->e != NULL && b->e == NULL)
>>>     return -1;
>>> earlier.  Similarly, if a->e == NULL, then we know that b-> == NULL, because
>>> we have:
>>>   if (a->e == NULL && b->e != NULL)
>>>     return 1;
>>> earlier.
>>
>> Ah, ok.  Twisty ;)  I suppose jump threading will have eliminated
>> the extra test.
> 
> In the first case maybe, I doubt it would do that after the
> iterative_hash_expr calls which are likely not pure.
Correct.  It'll have to assume that the memory objects changed.

Jeff
diff mbox

Patch

--- gcc/tree-vrp.c.jj	2017-07-04 10:43:48.627706528 +0200
+++ gcc/tree-vrp.c	2017-07-04 14:37:06.823101453 +0200
@@ -6393,20 +6393,37 @@  process_assert_insertions_for (tree name
   gcc_unreachable ();
 }
 
-/* Qsort helper for sorting assert locations.  */
+/* Qsort helper for sorting assert locations.  If stable is true, don't
+   use iterative_hash_expr because it can be unstable for -fcompare-debug,
+   on the other side some pointers might be NULL.  */
 
+template <bool stable>
 static int
 compare_assert_loc (const void *pa, const void *pb)
 {
   assert_locus * const a = *(assert_locus * const *)pa;
   assert_locus * const b = *(assert_locus * const *)pb;
-  if (! a->e && b->e)
+
+  /* If stable, some asserts might be optimized away already, sort
+     them last.  */
+  if (stable)
+    {
+      if (a == NULL)
+	return b != NULL;
+      else if (b == NULL)
+	return -1;
+    }
+
+  if (a->e == NULL && b->e != NULL)
     return 1;
-  else if (a->e && ! b->e)
+  else if (a->e != NULL && b->e == NULL)
     return -1;
 
+  /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
+     no need to test both a->e and b->e.  */
+
   /* Sort after destination index.  */
-  if (! a->e && ! b->e)
+  if (a->e == NULL)
     ;
   else if (a->e->dest->index > b->e->dest->index)
     return 1;
@@ -6419,11 +6436,27 @@  compare_assert_loc (const void *pa, cons
   else if (a->comp_code < b->comp_code)
     return -1;
 
+  hashval_t ha, hb;
+
+  /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
+     uses DECL_UID of the VAR_DECL, so sorting might differ between
+     -g and -g0.  When doing the removal of redundant assert exprs
+     and commonization to successors, this does not matter, but for
+     the final sort needs to be stable.  */
+  if (stable)
+    {
+      ha = 0;
+      hb = 0;
+    }
+  else
+    {
+      ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
+      hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
+    }
+
   /* Break the tie using hashing and source/bb index.  */
-  hashval_t ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
-  hashval_t hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
   if (ha == hb)
-    return (a->e && b->e
+    return (a->e != NULL
 	    ? a->e->src->index - b->e->src->index
 	    : a->bb->index - b->bb->index);
   return ha - hb;
@@ -6452,7 +6485,7 @@  process_assert_insertions (void)
       auto_vec<assert_locus *, 16> asserts;
       for (; loc; loc = loc->next)
 	asserts.safe_push (loc);
-      asserts.qsort (compare_assert_loc);
+      asserts.qsort (compare_assert_loc<false>);
 
       /* Push down common asserts to successors and remove redundant ones.  */
       unsigned ecnt = 0;
@@ -6506,11 +6539,14 @@  process_assert_insertions (void)
 	    }
 	}
 
+      /* The asserts vector sorting above might be unstable for
+	 -fcompare-debug, sort again to ensure a stable sort.  */
+      asserts.qsort (compare_assert_loc<true>);
       for (unsigned j = 0; j < asserts.length (); ++j)
 	{
 	  loc = asserts[j];
 	  if (! loc)
-	    continue;
+	    break;
 	  update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
 	  num_asserts++;
 	  free (loc);