diff mbox

haifa-sched: Fix problems with cc0 in prune_ready_list

Message ID 4F859044.3070907@codesourcery.com
State New
Headers show

Commit Message

Bernd Schmidt April 11, 2012, 2:08 p.m. UTC
On 02/10/2012 08:01 PM, Vladimir Makarov wrote:
> On 02/08/2012 10:01 AM, Bernd Schmidt wrote:
>> We found a scheduler problem while testing Coldfire targets. The
>> prune_ready_list function I introduced doesn't take SCHED_GROUPs into
>> account, which can lead to a random insn being scheduled between a cc0
>> setter and user. The patch below fixes it, OK? (Bootstrapped&  tested
>> i686-linux).
>>
>>
> Ok. Thanks, Bernd.

It turns out that the previous fix was insufficient. If a cc0 user and
another insn are on the ready list, it can happen that the cc0 user is
delayed for longer than the other insn, and we will reenter
prune_ready_list with a new ready insn but with the cc0 user still
queued for later. In that case we'll do the wrong thing.

The following patch reworks the function to take care of this problem.
Bootstrapped and tested on i686-linux, and also tested with a m68k
multilib on our internal compiler (4.6 based but with scheduler patches
from 4.7). Ok?

I assume this should also go into 4.7 eventually.


Bernd
* haifa-sched.c (prune_ready_list): Rework handling of SHCED_GROUP_P
	insns so that no other insn is queued for a time before them.

Comments

Vladimir Makarov April 11, 2012, 2:37 p.m. UTC | #1
On 04/11/2012 10:08 AM, Bernd Schmidt wrote:
> On 02/10/2012 08:01 PM, Vladimir Makarov wrote:
>> On 02/08/2012 10:01 AM, Bernd Schmidt wrote:
>>> We found a scheduler problem while testing Coldfire targets. The
>>> prune_ready_list function I introduced doesn't take SCHED_GROUPs into
>>> account, which can lead to a random insn being scheduled between a cc0
>>> setter and user. The patch below fixes it, OK? (Bootstrapped&   tested
>>> i686-linux).
>>>
>>>
>> Ok. Thanks, Bernd.
> It turns out that the previous fix was insufficient. If a cc0 user and
> another insn are on the ready list, it can happen that the cc0 user is
> delayed for longer than the other insn, and we will reenter
> prune_ready_list with a new ready insn but with the cc0 user still
> queued for later. In that case we'll do the wrong thing.
What a coincidence :)  Yesterday I found the same bug on m68k on lra 
branch and just started to work on it.
> The following patch reworks the function to take care of this problem.
> Bootstrapped and tested on i686-linux, and also tested with a m68k
> multilib on our internal compiler (4.6 based but with scheduler patches
> from 4.7). Ok?
Ok.
> I assume this should also go into 4.7 eventually.
>
Yes.  It is a serious bug for cc0 targets.

Thanks, Bernd.
diff mbox

Patch

Index: gcc/haifa-sched.c
===================================================================
--- gcc/haifa-sched.c	(revision 186270)
+++ gcc/haifa-sched.c	(working copy)
@@ -3946,88 +3946,106 @@  static void
 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
 		  bool shadows_only_p, bool modulo_epilogue_p)
 {
-  int i;
+  int i, pass;
   bool sched_group_found = false;
+  int min_cost_group = 1;
 
- restart:
   for (i = 0; i < ready.n_ready; i++)
     {
       rtx insn = ready_element (&ready, i);
-      int cost = 0;
-      const char *reason = "resource conflict";
-
-      if (DEBUG_INSN_P (insn))
-	continue;
-
-      if (SCHED_GROUP_P (insn) && !sched_group_found)
+      if (SCHED_GROUP_P (insn))
 	{
 	  sched_group_found = true;
-	  if (i > 0)
-	    goto restart;
+	  break;
 	}
+    }
 
-      if (sched_group_found && !SCHED_GROUP_P (insn))
-	{
-	  cost = 1;
-	  reason = "not in sched group";
-	}
-      else if (modulo_epilogue_p && INSN_EXACT_TICK (insn) == INVALID_TICK)
-	{
-	  cost = max_insn_queue_index;
-	  reason = "not an epilogue insn";
-	}
-      else if (shadows_only_p && !SHADOW_P (insn))
-	{
-	  cost = 1;
-	  reason = "not a shadow";
-	}
-      else if (recog_memoized (insn) < 0)
-	{
-	  if (!first_cycle_insn_p
-	      && (GET_CODE (PATTERN (insn)) == ASM_INPUT
-		  || asm_noperands (PATTERN (insn)) >= 0))
-	    cost = 1;
-	  reason = "asm";
-	}
-      else if (sched_pressure_p)
-	cost = 0;
-      else
+  /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
+     such an insn first and note its cost, then schedule all other insns
+     for one cycle later.  */
+  for (pass = sched_group_found ? 0 : 1; pass < 2; )
+    {
+      int n = ready.n_ready;
+      for (i = 0; i < n; i++)
 	{
-	  int delay_cost = 0;
+	  rtx insn = ready_element (&ready, i);
+	  int cost = 0;
+	  const char *reason = "resource conflict";
 
-	  if (delay_htab)
+	  if (DEBUG_INSN_P (insn))
+	    continue;
+
+	  if (sched_group_found && !SCHED_GROUP_P (insn))
 	    {
-	      struct delay_pair *delay_entry;
-	      delay_entry
-		= (struct delay_pair *)htab_find_with_hash (delay_htab, insn,
-							    htab_hash_pointer (insn));
-	      while (delay_entry && delay_cost == 0)
+	      if (pass == 0)
+		continue;
+	      cost = min_cost_group;
+	      reason = "not in sched group";
+	    }
+	  else if (modulo_epilogue_p
+		   && INSN_EXACT_TICK (insn) == INVALID_TICK)
+	    {
+	      cost = max_insn_queue_index;
+	      reason = "not an epilogue insn";
+	    }
+	  else if (shadows_only_p && !SHADOW_P (insn))
+	    {
+	      cost = 1;
+	      reason = "not a shadow";
+	    }
+	  else if (recog_memoized (insn) < 0)
+	    {
+	      if (!first_cycle_insn_p
+		  && (GET_CODE (PATTERN (insn)) == ASM_INPUT
+		      || asm_noperands (PATTERN (insn)) >= 0))
+		cost = 1;
+	      reason = "asm";
+	    }
+	  else if (sched_pressure_p)
+	    cost = 0;
+	  else
+	    {
+	      int delay_cost = 0;
+
+	      if (delay_htab)
 		{
-		  delay_cost = estimate_shadow_tick (delay_entry);
-		  if (delay_cost > max_insn_queue_index)
-		    delay_cost = max_insn_queue_index;
-		  delay_entry = delay_entry->next_same_i1;
+		  struct delay_pair *delay_entry;
+		  delay_entry
+		    = (struct delay_pair *)htab_find_with_hash (delay_htab, insn,
+								htab_hash_pointer (insn));
+		  while (delay_entry && delay_cost == 0)
+		    {
+		      delay_cost = estimate_shadow_tick (delay_entry);
+		      if (delay_cost > max_insn_queue_index)
+			delay_cost = max_insn_queue_index;
+		      delay_entry = delay_entry->next_same_i1;
+		    }
 		}
-	    }
 
-	  memcpy (temp_state, curr_state, dfa_state_size);
-	  cost = state_transition (temp_state, insn);
-	  if (cost < 0)
-	    cost = 0;
-	  else if (cost == 0)
-	    cost = 1;
-	  if (cost < delay_cost)
+	      memcpy (temp_state, curr_state, dfa_state_size);
+	      cost = state_transition (temp_state, insn);
+	      if (cost < 0)
+		cost = 0;
+	      else if (cost == 0)
+		cost = 1;
+	      if (cost < delay_cost)
+		{
+		  cost = delay_cost;
+		  reason = "shadow tick";
+		}
+	    }
+	  if (cost >= 1)
 	    {
-	      cost = delay_cost;
-	      reason = "shadow tick";
+	      if (SCHED_GROUP_P (insn) && cost > min_cost_group)
+		min_cost_group = cost;
+	      ready_remove (&ready, i);
+	      queue_insn (insn, cost, reason);
+	      if (i + 1 < n)
+		break;
 	    }
 	}
-      if (cost >= 1)
-	{
-	  ready_remove (&ready, i);
-	  queue_insn (insn, cost, reason);
-	  goto restart;
-	}
+      if (i == n)
+	pass++;
     }
 }