diff mbox

[x86] Fix combine for condditional instructions.

Message ID CAFULd4ZS5zZOsTmBFJW30A9ev=2cZwLY-6-ammmrFcu7q8zq+g@mail.gmail.com
State New
Headers show

Commit Message

Uros Bizjak Dec. 19, 2012, 4:08 p.m. UTC
On Fri, Dec 14, 2012 at 11:47 AM, Yuri Rumyantsev <ysrumyan@gmail.com> wrote:

> With your new fix that add if-then-else splitting for memory operand I
> got expected performance speed-up - +6.7% for Atom and +8.4% for SNB.
> We need to do all testing this weekend and I will get you our final
> feedback on Monday.

After some off-line discussions, we decided to enable splitting for
Atom only (where it was always a win), since splitting regressed SNB
in some other tests.

2012-12-19  Uros Bizjak  <ubizjak@gmail.com>
	    Yuri Rumyantsev  <ysrumyan@gmail.com>

	* config/i386/i386.h (enum ix86_tune_indices): Add
	X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE.
	(TARGET_AVOID_MEM_OPND_FOR_CMOVE): New define.
	* config/i386/i386.c (initial_ix86_tune_features)
	<X86TUNE_AVOID_MEM_OPND_FOR_CMOVE>: Initialize.
	* config/i386/i386.md (splitters to avoid cmove memory operands): New.
	(peephole2s to avoid cmove memory operands): New.

Tested on x86_64-pc-linux-gnu, committed to mainline SVN.

Uros.
diff mbox

Patch

Index: i386.c
===================================================================
--- i386.c	(revision 194610)
+++ i386.c	(working copy)
@@ -2026,7 +2026,11 @@  static unsigned int initial_ix86_tune_features[X86
 
   /* X86_TUNE_GENERAL_REGS_SSE_SPILL: Try to spill general regs to SSE
      regs instead of memory.  */
-  m_COREI7 | m_CORE2I7
+  m_COREI7 | m_CORE2I7,
+
+  /* X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE: Try to avoid memory operands for
+     a conditional move.  */
+  m_ATOM
 };
 
 /* Feature tests against the various architecture variations.  */
Index: i386.h
===================================================================
--- i386.h	(revision 194610)
+++ i386.h	(working copy)
@@ -331,6 +331,7 @@  enum ix86_tune_indices {
   X86_TUNE_REASSOC_INT_TO_PARALLEL,
   X86_TUNE_REASSOC_FP_TO_PARALLEL,
   X86_TUNE_GENERAL_REGS_SSE_SPILL,
+  X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE,
 
   X86_TUNE_LAST
 };
@@ -436,6 +437,8 @@  extern unsigned char ix86_tune_features[X86_TUNE_L
 	ix86_tune_features[X86_TUNE_REASSOC_FP_TO_PARALLEL]
 #define TARGET_GENERAL_REGS_SSE_SPILL \
 	ix86_tune_features[X86_TUNE_GENERAL_REGS_SSE_SPILL]
+#define TARGET_AVOID_MEM_OPND_FOR_CMOVE \
+	ix86_tune_features[X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE]
 
 /* Feature tests against the various architecture variations.  */
 enum ix86_arch_indices {
Index: i386.md
===================================================================
--- i386.md	(revision 194610)
+++ i386.md	(working copy)
@@ -16093,6 +16093,28 @@ 
   [(set_attr "type" "icmov")
    (set_attr "mode" "<MODE>")])
 
+;; Don't do conditional moves with memory inputs.  This splitter helps
+;; register starved x86_32 by forcing inputs into registers before reload.
+(define_split
+  [(set (match_operand:SWI248 0 "register_operand")
+	(if_then_else:SWI248 (match_operator 1 "ix86_comparison_operator"
+			       [(reg FLAGS_REG) (const_int 0)])
+	  (match_operand:SWI248 2 "nonimmediate_operand")
+	  (match_operand:SWI248 3 "nonimmediate_operand")))]
+  "!TARGET_64BIT && TARGET_CMOVE
+   && TARGET_AVOID_MEM_OPND_FOR_CMOVE
+   && (MEM_P (operands[2]) || MEM_P (operands[3]))
+   && can_create_pseudo_p ()
+   && optimize_insn_for_speed_p ()"
+  [(set (match_dup 0)
+	(if_then_else:SWI248 (match_dup 1) (match_dup 2) (match_dup 3)))]
+{
+  if (MEM_P (operands[2]))
+    operands[2] = force_reg (<MODE>mode, operands[2]);
+  if (MEM_P (operands[3]))
+    operands[3] = force_reg (<MODE>mode, operands[3]);
+})
+
 (define_insn "*movqicc_noc"
   [(set (match_operand:QI 0 "register_operand" "=r,r")
 	(if_then_else:QI (match_operator 1 "ix86_comparison_operator"
@@ -16105,14 +16127,12 @@ 
    (set_attr "mode" "QI")])
 
 (define_split
-  [(set (match_operand 0 "register_operand")
-	(if_then_else (match_operator 1 "ix86_comparison_operator"
-			[(reg FLAGS_REG) (const_int 0)])
-		      (match_operand 2 "register_operand")
-		      (match_operand 3 "register_operand")))]
+  [(set (match_operand:SWI12 0 "register_operand")
+	(if_then_else:SWI12 (match_operator 1 "ix86_comparison_operator"
+			      [(reg FLAGS_REG) (const_int 0)])
+		      (match_operand:SWI12 2 "register_operand")
+		      (match_operand:SWI12 3 "register_operand")))]
   "TARGET_CMOVE && !TARGET_PARTIAL_REG_STALL
-   && (GET_MODE (operands[0]) == QImode
-       || GET_MODE (operands[0]) == HImode)
    && reload_completed"
   [(set (match_dup 0)
 	(if_then_else:SI (match_dup 1) (match_dup 2) (match_dup 3)))]
@@ -16122,6 +16142,33 @@ 
   operands[3] = gen_lowpart (SImode, operands[3]);
 })
 
+;; Don't do conditional moves with memory inputs
+(define_peephole2
+  [(match_scratch:SWI248 2 "r")
+   (set (match_operand:SWI248 0 "register_operand")
+	(if_then_else:SWI248 (match_operator 1 "ix86_comparison_operator"
+			       [(reg FLAGS_REG) (const_int 0)])
+	  (match_dup 0)
+	  (match_operand:SWI248 3 "memory_operand")))]
+  "TARGET_CMOVE && TARGET_AVOID_MEM_OPND_FOR_CMOVE
+   && optimize_insn_for_speed_p ()"
+  [(set (match_dup 2) (match_dup 3))
+   (set (match_dup 0)
+	(if_then_else:SWI248 (match_dup 1) (match_dup 0) (match_dup 2)))])
+
+(define_peephole2
+  [(match_scratch:SWI248 2 "r")
+   (set (match_operand:SWI248 0 "register_operand")
+	(if_then_else:SWI248 (match_operator 1 "ix86_comparison_operator"
+			       [(reg FLAGS_REG) (const_int 0)])
+	  (match_operand:SWI248 3 "memory_operand")
+	  (match_dup 0)))]
+  "TARGET_CMOVE && TARGET_AVOID_MEM_OPND_FOR_CMOVE
+   && optimize_insn_for_speed_p ()"
+  [(set (match_dup 2) (match_dup 3))
+   (set (match_dup 0)
+	(if_then_else:SWI248 (match_dup 1) (match_dup 2) (match_dup 0)))])
+
 (define_expand "mov<mode>cc"
   [(set (match_operand:X87MODEF 0 "register_operand")
 	(if_then_else:X87MODEF
@@ -16209,6 +16256,59 @@ 
   [(set_attr "type" "fcmov,fcmov,icmov,icmov")
    (set_attr "mode" "SF,SF,SI,SI")])
 
+;; Don't do conditional moves with memory inputs.  This splitter helps
+;; register starved x86_32 by forcing inputs into registers before reload.
+(define_split
+  [(set (match_operand:MODEF 0 "register_operand")
+	(if_then_else:MODEF (match_operator 1 "ix86_comparison_operator"
+			      [(reg FLAGS_REG) (const_int 0)])
+	  (match_operand:MODEF 2 "nonimmediate_operand")
+	  (match_operand:MODEF 3 "nonimmediate_operand")))]
+  "!TARGET_64BIT && TARGET_80387 && TARGET_CMOVE
+   && TARGET_AVOID_MEM_OPND_FOR_CMOVE
+   && (MEM_P (operands[2]) || MEM_P (operands[3]))
+   && can_create_pseudo_p ()
+   && optimize_insn_for_speed_p ()"
+  [(set (match_dup 0)
+	(if_then_else:MODEF (match_dup 1) (match_dup 2) (match_dup 3)))]
+{
+  if (MEM_P (operands[2]))
+    operands[2] = force_reg (<MODE>mode, operands[2]);
+  if (MEM_P (operands[3]))
+    operands[3] = force_reg (<MODE>mode, operands[3]);
+})
+
+;; Don't do conditional moves with memory inputs
+(define_peephole2
+  [(match_scratch:MODEF 2 "r")
+   (set (match_operand:MODEF 0 "register_and_not_any_fp_reg_operand")
+	(if_then_else:MODEF (match_operator 1 "fcmov_comparison_operator"
+			      [(reg FLAGS_REG) (const_int 0)])
+	  (match_dup 0)
+	  (match_operand:MODEF 3 "memory_operand")))]
+  "(<MODE>mode != DFmode || TARGET_64BIT)
+   && TARGET_80387 && TARGET_CMOVE
+   && TARGET_AVOID_MEM_OPND_FOR_CMOVE
+   && optimize_insn_for_speed_p ()"
+  [(set (match_dup 2) (match_dup 3))
+   (set (match_dup 0)
+	(if_then_else:MODEF (match_dup 1) (match_dup 0) (match_dup 2)))])
+
+(define_peephole2
+  [(match_scratch:MODEF 2 "r")
+   (set (match_operand:MODEF 0 "register_and_not_any_fp_reg_operand")
+	(if_then_else:MODEF (match_operator 1 "fcmov_comparison_operator"
+			      [(reg FLAGS_REG) (const_int 0)])
+	  (match_operand:MODEF 3 "memory_operand")
+	  (match_dup 0)))]
+  "(<MODE>mode != DFmode || TARGET_64BIT)
+   && TARGET_80387 && TARGET_CMOVE
+   && TARGET_AVOID_MEM_OPND_FOR_CMOVE
+   && optimize_insn_for_speed_p ()"
+  [(set (match_dup 2) (match_dup 3))
+   (set (match_dup 0)
+	(if_then_else:MODEF (match_dup 1) (match_dup 2) (match_dup 0)))])
+
 ;; All moves in XOP pcmov instructions are 128 bits and hence we restrict
 ;; the scalar versions to have only XMM registers as operands.