Patchwork [ARM] Rejig constraint order in *movdf_vfp and *thumb2_movdf_vfp patterns.

login
register
mail settings
Submitter Ramana Radhakrishnan
Date July 27, 2011, 11 a.m.
Message ID <CACUk7=VqWeYCofKWjKZHY2Z+M5kefY5AVLiatmtA9oQA7JqjAA@mail.gmail.com>
Download mbox | patch
Permalink /patch/107027/
State New
Headers show

Comments

Ramana Radhakrishnan - July 27, 2011, 11 a.m.
Hi,

While looking at why we have so many moves between VFP and core
registers, I came across a situation in LU.c from scimark where we
load values into core registers transfer them to the VFP registers and
then use them in a VFP multiply instructions. Given that we should
prefer FPmode values in FP registers when TARGET_VFP is true, the
constraint orders ought to be rejigged to allow this. The diff below
shows the difference in code generated for that function after and
before the patch. Looking at *movdf_vfp I can only guess that the
original intention of these patterns were to have the VFP registers
ahead because the type attribute is set to f_loadd and f_stored for
the alternatives but the constraints and everything else was not in
sync with this.



 >         str     sl, [sp, #12]
                                ...
        fldd    d19, [sl, #0]                                      |
      ldrd    sl, [sl]
        fmuld   d17, d19, d16                                  |
  fmdrr   d18, sl, fp

>          ldr     sl, [sp, #12]

>         fmuld   d17, d18, d16


This is now being tested on v7-a / qemu with arm/thumb multilibs for
arm-linux-gnueabi. Ok to commit ?

cheers
Ramana


2011-07-26  Ramana Radhakrishnan  <ramana.radhakrishnan@linaro.org>

	* config/arm/vfp.md ("*movdf_vfp"): Handle the VFP constraints
	before the core constraints. Adjust attributes.
	(*thumb2_movdf_vfp"): Likewise.
Ramana Radhakrishnan - July 28, 2011, 12:03 p.m.
>  Ok to commit ?

Richard was ok with this offline and the tests showed no regressions. So 
I've committed this today.

Ramana

Patch

diff --git a/gcc/config/arm/vfp.md b/gcc/config/arm/vfp.md
index e2165a8..3311ae0 100644
--- a/gcc/config/arm/vfp.md
+++ b/gcc/config/arm/vfp.md
@@ -408,8 +408,8 @@ 
 ;; DFmode moves
 
 (define_insn "*movdf_vfp"
-  [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=w,?r,w ,r, m,w  ,Uv,w,r")
-	(match_operand:DF 1 "soft_df_operand"		   " ?r,w,Dy,mF,r,UvF,w, w,r"))]
+  [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=w,?r,w ,w  ,Uv,r, m,w,r")
+	(match_operand:DF 1 "soft_df_operand"		   " ?r,w,Dy,UvF,w ,mF,r,w,r"))]
   "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP
    && (   register_operand (operands[0], DFmode)
        || register_operand (operands[1], DFmode))"
@@ -425,9 +425,9 @@ 
 	gcc_assert (TARGET_VFP_DOUBLE);
         return \"fconstd%?\\t%P0, #%G1\";
       case 3: case 4:
-	return output_move_double (operands);
-      case 5: case 6:
 	return output_move_vfp (operands);
+      case 5: case 6:
+	return output_move_double (operands);
       case 7:
 	if (TARGET_VFP_SINGLE)
 	  return \"fcpys%?\\t%0, %1\;fcpys%?\\t%p0, %p1\";
@@ -442,7 +442,7 @@ 
   "
   [(set_attr "type"
      "r_2_f,f_2_r,fconstd,f_loadd,f_stored,load2,store2,ffarithd,*")
-   (set (attr "length") (cond [(eq_attr "alternative" "3,4,8") (const_int 8)
+   (set (attr "length") (cond [(eq_attr "alternative" "5,6,8") (const_int 8)
 			       (eq_attr "alternative" "7")
 				(if_then_else
 				 (eq (symbol_ref "TARGET_VFP_SINGLE")
@@ -456,8 +456,8 @@ 
 )
 
 (define_insn "*thumb2_movdf_vfp"
-  [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=w,?r,w ,r, m,w  ,Uv,w,r")
-	(match_operand:DF 1 "soft_df_operand"		   " ?r,w,Dy,mF,r,UvF,w, w,r"))]
+  [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=w,?r,w ,w  ,Uv,r ,m,w,r")
+	(match_operand:DF 1 "soft_df_operand"		   " ?r,w,Dy,UvF,w, mF,r, w,r"))]
   "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP"
   "*
   {
@@ -470,10 +470,10 @@ 
       case 2:
 	gcc_assert (TARGET_VFP_DOUBLE);
 	return \"fconstd%?\\t%P0, #%G1\";
-      case 3: case 4: case 8:
-	return output_move_double (operands);
-      case 5: case 6:
+      case 3: case 4:
 	return output_move_vfp (operands);
+      case 5: case 6: case 8:
+	return output_move_double (operands);
       case 7:
 	if (TARGET_VFP_SINGLE)
 	  return \"fcpys%?\\t%0, %1\;fcpys%?\\t%p0, %p1\";
@@ -485,8 +485,8 @@ 
     }
   "
   [(set_attr "type"
-     "r_2_f,f_2_r,fconstd,load2,store2,f_loadd,f_stored,ffarithd,*")
-   (set (attr "length") (cond [(eq_attr "alternative" "3,4,8") (const_int 8)
+     "r_2_f,f_2_r,fconstd,f_loadd,f_stored,load2,store2,ffarithd,*")
+   (set (attr "length") (cond [(eq_attr "alternative" "5,6,8") (const_int 8)
 			       (eq_attr "alternative" "7")
 				(if_then_else
 				 (eq (symbol_ref "TARGET_VFP_SINGLE")
@@ -494,8 +494,8 @@ 
 				 (const_int 8)
 				 (const_int 4))]
 			      (const_int 4)))
-   (set_attr "pool_range" "*,*,*,4096,*,1020,*,*,*")
-   (set_attr "neg_pool_range" "*,*,*,0,*,1008,*,*,*")]
+   (set_attr "pool_range" "*,*,*,1020,*,4096,*,*,*")
+   (set_attr "neg_pool_range" "*,*,*,1008,*,0,*,*,*")]
 )