diff mbox

[ARM,2/7] Convert FP mnemonics to UAL | add/sub/div/abs patterns

Message ID 53F36BA4.3050006@arm.com
State New
Headers show

Commit Message

Kyrylo Tkachov Aug. 19, 2014, 3:22 p.m. UTC
Hi all,

Nothing too controversial here, convert the concerned patterns to UAL.
The size of the data types in the operation is expressed in the .f32 or
.f64 suffix.

Ok for trunk?

Thanks,
Kyrill

2014-08-19  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>

      * config/arm/vfp.md (*abssf2_vfp): Use UAL assembly syntax.
      (*absdf2_vfp): Likewise.
      (*negsf2_vfp): Likewise.
      (*negdf2_vfp): Likewise.
      (*addsf3_vfp): Likewise.
      (*adddf3_vfp): Likewise.
      (*subsf3_vfp): Likewise.
      (*subdf3_vfp): Likewise.
      (*divsf3_vfp): Likewise.
      (*divdf3_vfp): Likewise.

2014-08-19  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>

      * gcc.target/arm/vfp-1.c: Updated expected assembly.

Comments

Ramana Radhakrishnan Sept. 9, 2014, 11:03 a.m. UTC | #1
On Tue, Aug 19, 2014 at 4:22 PM, Kyrill Tkachov <kyrylo.tkachov@arm.com> wrote:
> Hi all,
>
> Nothing too controversial here, convert the concerned patterns to UAL.
> The size of the data types in the operation is expressed in the .f32 or
> .f64 suffix.
>
> Ok for trunk?

Ok - in an ideal world I'd have just done these with iterators but ok.

Ramana

>
> Thanks,
> Kyrill
>
> 2014-08-19  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
>
>      * config/arm/vfp.md (*abssf2_vfp): Use UAL assembly syntax.
>      (*absdf2_vfp): Likewise.
>      (*negsf2_vfp): Likewise.
>      (*negdf2_vfp): Likewise.
>      (*addsf3_vfp): Likewise.
>      (*adddf3_vfp): Likewise.
>      (*subsf3_vfp): Likewise.
>      (*subdf3_vfp): Likewise.
>      (*divsf3_vfp): Likewise.
>      (*divdf3_vfp): Likewise.
>
> 2014-08-19  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
>
>      * gcc.target/arm/vfp-1.c: Updated expected assembly.
diff mbox

Patch

commit 271c9401d309e887bcedcae7743139741f6fe9a2
Author: Kyrylo Tkachov <kyrylo.tkachov@arm.com>
Date:   Thu Jul 17 09:47:15 2014 +0100

    [ARM][2/n] Convert FP mnemonics to UAL | add/sub/div/abs patterns

diff --git a/gcc/config/arm/vfp.md b/gcc/config/arm/vfp.md
index d25505c..975f5ae 100644
--- a/gcc/config/arm/vfp.md
+++ b/gcc/config/arm/vfp.md
@@ -588,7 +588,7 @@ 
   [(set (match_operand:SF	  0 "s_register_operand" "=t")
 	(abs:SF (match_operand:SF 1 "s_register_operand" "t")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
-  "fabss%?\\t%0, %1"
+  "vabs%?.f32\\t%0, %1"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "ffariths")]
@@ -598,7 +598,7 @@ 
   [(set (match_operand:DF	  0 "s_register_operand" "=w")
 	(abs:DF (match_operand:DF 1 "s_register_operand" "w")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
-  "fabsd%?\\t%P0, %P1"
+  "vabs%?.f64\\t%P0, %P1"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "ffarithd")]
@@ -609,7 +609,7 @@ 
 	(neg:SF (match_operand:SF 1 "s_register_operand" "t,r")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
   "@
-   fnegs%?\\t%0, %1
+   vneg%?.f32\\t%0, %1
    eor%?\\t%0, %1, #-2147483648"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
@@ -621,7 +621,7 @@ 
 	(neg:DF (match_operand:DF 1 "s_register_operand" "w,0,r")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
   "@
-   fnegd%?\\t%P0, %P1
+   vneg%?.f64\\t%P0, %P1
    #
    #"
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE && reload_completed
@@ -671,7 +671,7 @@ 
 	(plus:SF (match_operand:SF 1 "s_register_operand" "t")
 		 (match_operand:SF 2 "s_register_operand" "t")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
-  "fadds%?\\t%0, %1, %2"
+  "vadd%?.f32\\t%0, %1, %2"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fadds")]
@@ -682,7 +682,7 @@ 
 	(plus:DF (match_operand:DF 1 "s_register_operand" "w")
 		 (match_operand:DF 2 "s_register_operand" "w")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
-  "faddd%?\\t%P0, %P1, %P2"
+  "vadd%?.f64\\t%P0, %P1, %P2"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "faddd")]
@@ -694,7 +694,7 @@ 
 	(minus:SF (match_operand:SF 1 "s_register_operand" "t")
 		  (match_operand:SF 2 "s_register_operand" "t")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
-  "fsubs%?\\t%0, %1, %2"
+  "vsub%?.f32\\t%0, %1, %2"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fadds")]
@@ -705,7 +705,7 @@ 
 	(minus:DF (match_operand:DF 1 "s_register_operand" "w")
 		  (match_operand:DF 2 "s_register_operand" "w")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
-  "fsubd%?\\t%P0, %P1, %P2"
+  "vsub%?.f64\\t%P0, %P1, %P2"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "faddd")]
@@ -722,7 +722,7 @@ 
 	(div:SF (match_operand:SF 1 "s_register_operand" "t,t")
 		(match_operand:SF 2 "s_register_operand" "t,t")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
-  "fdivs%?\\t%0, %1, %2"
+  "vdiv%?.f32\\t%0, %1, %2"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "arch" "*,armv6_or_vfpv3")
@@ -734,7 +734,7 @@ 
 	(div:DF (match_operand:DF 1 "s_register_operand" "w,w")
 		(match_operand:DF 2 "s_register_operand" "w,w")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
-  "fdivd%?\\t%P0, %P1, %P2"
+  "vdiv%?.f64\\t%P0, %P1, %P2"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "arch" "*,armv6_or_vfpv3")
diff --git a/gcc/testsuite/gcc.target/arm/vfp-1.c b/gcc/testsuite/gcc.target/arm/vfp-1.c
index 2355b4d..3027f10 100644
--- a/gcc/testsuite/gcc.target/arm/vfp-1.c
+++ b/gcc/testsuite/gcc.target/arm/vfp-1.c
@@ -11,19 +11,19 @@  volatile float f1, f2, f3;
 
 void test_sf() {
   /* abssf2_vfp */
-  /* { dg-final { scan-assembler "fabss" } } */
+  /* { dg-final { scan-assembler "vabs.f32" } } */
   f1 = fabsf (f1);
   /* negsf2_vfp */
-  /* { dg-final { scan-assembler "fnegs" } } */
+  /* { dg-final { scan-assembler "vneg.f32" } } */
   f1 = -f1;
   /* addsf3_vfp */
-  /* { dg-final { scan-assembler "fadds" } } */
+  /* { dg-final { scan-assembler "vadd.f32" } } */
   f1 = f2 + f3;
   /* subsf3_vfp */
-  /* { dg-final { scan-assembler "fsubs" } } */
+  /* { dg-final { scan-assembler "vsub.f32" } } */
   f1 = f2 - f3;
   /* divsf3_vfp */
-  /* { dg-final { scan-assembler "fdivs" } } */
+  /* { dg-final { scan-assembler "vdiv.f32" } } */
   f1 = f2 / f3;
   /* mulsf3_vfp */
   /* { dg-final { scan-assembler "fmuls" } } */
@@ -52,19 +52,19 @@  volatile double d1, d2, d3;
 
 void test_df() {
   /* absdf2_vfp */
-  /* { dg-final { scan-assembler "fabsd" } } */
+  /* { dg-final { scan-assembler "vabs.f64" } } */
   d1 = fabs (d1);
   /* negdf2_vfp */
-  /* { dg-final { scan-assembler "fnegd" } } */
+  /* { dg-final { scan-assembler "vneg.f64" } } */
   d1 = -d1;
   /* adddf3_vfp */
-  /* { dg-final { scan-assembler "faddd" } } */
+  /* { dg-final { scan-assembler "vadd.f64" } } */
   d1 = d2 + d3;
   /* subdf3_vfp */
-  /* { dg-final { scan-assembler "fsubd" } } */
+  /* { dg-final { scan-assembler "vsub.f64" } } */
   d1 = d2 - d3;
   /* divdf3_vfp */
-  /* { dg-final { scan-assembler "fdivd" } } */
+  /* { dg-final { scan-assembler "vdiv.f64" } } */
   d1 = d2 / d3;
   /* muldf3_vfp */
   /* { dg-final { scan-assembler "fmuld" } } */