diff mbox series

[v2,aarch64] Correct the maximum shift amount for shifted operands.

Message ID 20181109113824.2923-1-christoph.muellner@theobroma-systems.com
State New
Headers show
Series [v2,aarch64] Correct the maximum shift amount for shifted operands. | expand

Commit Message

Christoph Muellner Nov. 9, 2018, 11:38 a.m. UTC
From: Christoph Muellner <christoph.muellner@theobroma-systems.com>

The aarch64 ISA specification allows a left shift amount to be applied
after extension in the range of 0 to 4 (encoded in the imm3 field).

This is true for at least the following instructions:

 * ADD (extend register)
 * ADDS (extended register)
 * SUB (extended register)

The result of this patch can be seen, when compiling the following code:

uint64_t myadd(uint64_t a, uint64_t b)
{
  return a+(((uint8_t)b)<<4);
}

Without the patch the following sequence will be generated:

0000000000000000 <myadd>:
   0:	d37c1c21 	ubfiz	x1, x1, #4, #8
   4:	8b000020 	add	x0, x1, x0
   8:	d65f03c0 	ret

With the patch the ubfiz will be merged into the add instruction:

0000000000000000 <myadd>:
   0:	8b211000 	add	x0, x0, w1, uxtb #4
   4:	d65f03c0 	ret

Tested with "make check" and no regressions found.

*** gcc/ChangeLog ***

2018-xx-xx  Christoph Muellner  <christoph.muellner@theobroma-systems.com>
	    Philipp Tomsich  <philipp.tomsich@theobroma-systems.com>

	* config/aarch64/aarch64.c (aarch64_uxt_size): Correct the maximum
	shift amount for shifted operands.

*** gcc/testsuite/ChangeLog ***

2018-xx-xx  Christoph Muellner  <christoph.muellner@theobroma-systems.com>
	    Philipp Tomsich  <philipp.tomsich@theobroma-systems.com>

	* gcc.target/aarch64/extend.c: Adjust the testcases to cover
	the changed shift amount.
---
 gcc/config/aarch64/aarch64.c              |  2 +-
 gcc/testsuite/gcc.target/aarch64/extend.c | 16 ++++++++--------
 2 files changed, 9 insertions(+), 9 deletions(-)

Comments

Kyrill Tkachov Nov. 9, 2018, 6:22 p.m. UTC | #1
Hi Christoph,


On 09/11/18 11:38, christoph.muellner@theobroma-systems.com wrote:
> From: Christoph Muellner <christoph.muellner@theobroma-systems.com>
>
> The aarch64 ISA specification allows a left shift amount to be applied
> after extension in the range of 0 to 4 (encoded in the imm3 field).
>
> This is true for at least the following instructions:
>
>   * ADD (extend register)
>   * ADDS (extended register)
>   * SUB (extended register)
>
> The result of this patch can be seen, when compiling the following code:
>
> uint64_t myadd(uint64_t a, uint64_t b)
> {
>    return a+(((uint8_t)b)<<4);
> }
>
> Without the patch the following sequence will be generated:
>
> 0000000000000000 <myadd>:
>     0:	d37c1c21 	ubfiz	x1, x1, #4, #8
>     4:	8b000020 	add	x0, x1, x0
>     8:	d65f03c0 	ret
>
> With the patch the ubfiz will be merged into the add instruction:
>
> 0000000000000000 <myadd>:
>     0:	8b211000 	add	x0, x0, w1, uxtb #4
>     4:	d65f03c0 	ret
>
> Tested with "make check" and no regressions found.
>
> *** gcc/ChangeLog ***
>
> 2018-xx-xx  Christoph Muellner  <christoph.muellner@theobroma-systems.com>
> 	    Philipp Tomsich  <philipp.tomsich@theobroma-systems.com>
>
> 	* config/aarch64/aarch64.c (aarch64_uxt_size): Correct the maximum
> 	shift amount for shifted operands.
>
> *** gcc/testsuite/ChangeLog ***
>
> 2018-xx-xx  Christoph Muellner  <christoph.muellner@theobroma-systems.com>
> 	    Philipp Tomsich  <philipp.tomsich@theobroma-systems.com>
>
> 	* gcc.target/aarch64/extend.c: Adjust the testcases to cover
> 	the changed shift amount.

Thanks, this looks good to me.
You'll still need a maintainer to approve.

Kyrill


> ---
>   gcc/config/aarch64/aarch64.c              |  2 +-
>   gcc/testsuite/gcc.target/aarch64/extend.c | 16 ++++++++--------
>   2 files changed, 9 insertions(+), 9 deletions(-)
>
> diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
> index c82c7b6..c85988a 100644
> --- a/gcc/config/aarch64/aarch64.c
> +++ b/gcc/config/aarch64/aarch64.c
> @@ -8190,7 +8190,7 @@ aarch64_output_casesi (rtx *operands)
>   int
>   aarch64_uxt_size (int shift, HOST_WIDE_INT mask)
>   {
> -  if (shift >= 0 && shift <= 3)
> +  if (shift >= 0 && shift <= 4)
>       {
>         int size;
>         for (size = 8; size <= 32; size *= 2)
> diff --git a/gcc/testsuite/gcc.target/aarch64/extend.c b/gcc/testsuite/gcc.target/aarch64/extend.c
> index f399e55..7986c5b 100644
> --- a/gcc/testsuite/gcc.target/aarch64/extend.c
> +++ b/gcc/testsuite/gcc.target/aarch64/extend.c
> @@ -32,8 +32,8 @@ ldr_sxtw0 (char *arr, int i)
>   unsigned long long
>   adddi_uxtw (unsigned long long a, unsigned int i)
>   {
> -  /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*uxtw #?3" } } */
> -  return a + ((unsigned long long)i << 3);
> +  /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*uxtw #?4" } } */
> +  return a + ((unsigned long long)i << 4);
>   }
>   
>   unsigned long long
> @@ -46,8 +46,8 @@ adddi_uxtw0 (unsigned long long a, unsigned int i)
>   long long
>   adddi_sxtw (long long a, int i)
>   {
> -  /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*sxtw #?3" } } */
> -  return a + ((long long)i << 3);
> +  /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*sxtw #?4" } } */
> +  return a + ((long long)i << 4);
>   }
>   
>   long long
> @@ -60,8 +60,8 @@ adddi_sxtw0 (long long a, int i)
>   unsigned long long
>   subdi_uxtw (unsigned long long a, unsigned int i)
>   {
> -  /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*uxtw #?3" } } */
> -  return a - ((unsigned long long)i << 3);
> +  /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*uxtw #?4" } } */
> +  return a - ((unsigned long long)i << 4);
>   }
>   
>   unsigned long long
> @@ -74,8 +74,8 @@ subdi_uxtw0 (unsigned long long a, unsigned int i)
>   long long
>   subdi_sxtw (long long a, int i)
>   {
> -  /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*sxtw #?3" } } */
> -  return a - ((long long)i << 3);
> +  /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*sxtw #?4" } } */
> +  return a - ((long long)i << 4);
>   }
>   
>   long long
Andrew Pinski Nov. 10, 2018, 12:04 a.m. UTC | #2
On Fri, Nov 9, 2018 at 3:39 AM <christoph.muellner@theobroma-systems.com> wrote:
>
> From: Christoph Muellner <christoph.muellner@theobroma-systems.com>
>
> The aarch64 ISA specification allows a left shift amount to be applied
> after extension in the range of 0 to 4 (encoded in the imm3 field).
>
> This is true for at least the following instructions:
>
>  * ADD (extend register)
>  * ADDS (extended register)
>  * SUB (extended register)
>
> The result of this patch can be seen, when compiling the following code:
>
> uint64_t myadd(uint64_t a, uint64_t b)
> {
>   return a+(((uint8_t)b)<<4);
> }
>
> Without the patch the following sequence will be generated:
>
> 0000000000000000 <myadd>:
>    0:   d37c1c21        ubfiz   x1, x1, #4, #8
>    4:   8b000020        add     x0, x1, x0
>    8:   d65f03c0        ret
>
> With the patch the ubfiz will be merged into the add instruction:
>
> 0000000000000000 <myadd>:
>    0:   8b211000        add     x0, x0, w1, uxtb #4
>    4:   d65f03c0        ret
>
> Tested with "make check" and no regressions found.
>
> *** gcc/ChangeLog ***
>
> 2018-xx-xx  Christoph Muellner  <christoph.muellner@theobroma-systems.com>
>             Philipp Tomsich  <philipp.tomsich@theobroma-systems.com>
>
>         * config/aarch64/aarch64.c (aarch64_uxt_size): Correct the maximum
>         shift amount for shifted operands.
>
> *** gcc/testsuite/ChangeLog ***
>
> 2018-xx-xx  Christoph Muellner  <christoph.muellner@theobroma-systems.com>
>             Philipp Tomsich  <philipp.tomsich@theobroma-systems.com>
>
>         * gcc.target/aarch64/extend.c: Adjust the testcases to cover
>         the changed shift amount.

I think it might be better if we added new testcases instead of
modifying old ones in this case.  The main reason is that if we test
an older compiler with the newer testsuite (which you can do), you
should get the tests failing.
The main reason why you should be modifying testcases if the tests
were not testing what they should be testing ...  This is not one of
those cases.

Thanks,
Andrew Pinski

> ---
>  gcc/config/aarch64/aarch64.c              |  2 +-
>  gcc/testsuite/gcc.target/aarch64/extend.c | 16 ++++++++--------
>  2 files changed, 9 insertions(+), 9 deletions(-)
>
> diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
> index c82c7b6..c85988a 100644
> --- a/gcc/config/aarch64/aarch64.c
> +++ b/gcc/config/aarch64/aarch64.c
> @@ -8190,7 +8190,7 @@ aarch64_output_casesi (rtx *operands)
>  int
>  aarch64_uxt_size (int shift, HOST_WIDE_INT mask)
>  {
> -  if (shift >= 0 && shift <= 3)
> +  if (shift >= 0 && shift <= 4)
>      {
>        int size;
>        for (size = 8; size <= 32; size *= 2)
> diff --git a/gcc/testsuite/gcc.target/aarch64/extend.c b/gcc/testsuite/gcc.target/aarch64/extend.c
> index f399e55..7986c5b 100644
> --- a/gcc/testsuite/gcc.target/aarch64/extend.c
> +++ b/gcc/testsuite/gcc.target/aarch64/extend.c
> @@ -32,8 +32,8 @@ ldr_sxtw0 (char *arr, int i)
>  unsigned long long
>  adddi_uxtw (unsigned long long a, unsigned int i)
>  {
> -  /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*uxtw #?3" } } */
> -  return a + ((unsigned long long)i << 3);
> +  /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*uxtw #?4" } } */
> +  return a + ((unsigned long long)i << 4);
>  }
>
>  unsigned long long
> @@ -46,8 +46,8 @@ adddi_uxtw0 (unsigned long long a, unsigned int i)
>  long long
>  adddi_sxtw (long long a, int i)
>  {
> -  /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*sxtw #?3" } } */
> -  return a + ((long long)i << 3);
> +  /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*sxtw #?4" } } */
> +  return a + ((long long)i << 4);
>  }
>
>  long long
> @@ -60,8 +60,8 @@ adddi_sxtw0 (long long a, int i)
>  unsigned long long
>  subdi_uxtw (unsigned long long a, unsigned int i)
>  {
> -  /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*uxtw #?3" } } */
> -  return a - ((unsigned long long)i << 3);
> +  /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*uxtw #?4" } } */
> +  return a - ((unsigned long long)i << 4);
>  }
>
>  unsigned long long
> @@ -74,8 +74,8 @@ subdi_uxtw0 (unsigned long long a, unsigned int i)
>  long long
>  subdi_sxtw (long long a, int i)
>  {
> -  /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*sxtw #?3" } } */
> -  return a - ((long long)i << 3);
> +  /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*sxtw #?4" } } */
> +  return a - ((long long)i << 4);
>  }
>
>  long long
> --
> 2.9.5
>
diff mbox series

Patch

diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index c82c7b6..c85988a 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -8190,7 +8190,7 @@  aarch64_output_casesi (rtx *operands)
 int
 aarch64_uxt_size (int shift, HOST_WIDE_INT mask)
 {
-  if (shift >= 0 && shift <= 3)
+  if (shift >= 0 && shift <= 4)
     {
       int size;
       for (size = 8; size <= 32; size *= 2)
diff --git a/gcc/testsuite/gcc.target/aarch64/extend.c b/gcc/testsuite/gcc.target/aarch64/extend.c
index f399e55..7986c5b 100644
--- a/gcc/testsuite/gcc.target/aarch64/extend.c
+++ b/gcc/testsuite/gcc.target/aarch64/extend.c
@@ -32,8 +32,8 @@  ldr_sxtw0 (char *arr, int i)
 unsigned long long
 adddi_uxtw (unsigned long long a, unsigned int i)
 {
-  /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*uxtw #?3" } } */
-  return a + ((unsigned long long)i << 3);
+  /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*uxtw #?4" } } */
+  return a + ((unsigned long long)i << 4);
 }
 
 unsigned long long
@@ -46,8 +46,8 @@  adddi_uxtw0 (unsigned long long a, unsigned int i)
 long long
 adddi_sxtw (long long a, int i)
 {
-  /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*sxtw #?3" } } */
-  return a + ((long long)i << 3);
+  /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*sxtw #?4" } } */
+  return a + ((long long)i << 4);
 }
 
 long long
@@ -60,8 +60,8 @@  adddi_sxtw0 (long long a, int i)
 unsigned long long
 subdi_uxtw (unsigned long long a, unsigned int i)
 {
-  /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*uxtw #?3" } } */
-  return a - ((unsigned long long)i << 3);
+  /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*uxtw #?4" } } */
+  return a - ((unsigned long long)i << 4);
 }
 
 unsigned long long
@@ -74,8 +74,8 @@  subdi_uxtw0 (unsigned long long a, unsigned int i)
 long long
 subdi_sxtw (long long a, int i)
 {
-  /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*sxtw #?3" } } */
-  return a - ((long long)i << 3);
+  /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*sxtw #?4" } } */
+  return a - ((long long)i << 4);
 }
 
 long long