diff mbox series

[AArch64] Improve popcount expansion

Message ID AM5PR0801MB20356A44EFBDBA1F3CA271D983000@AM5PR0801MB2035.eurprd08.prod.outlook.com
State New
Headers show
Series [AArch64] Improve popcount expansion | expand

Commit Message

Wilco Dijkstra Feb. 3, 2020, 3:01 p.m. UTC
The popcount expansion uses umov to extend the result and move it back
to the integer register file.  If we model ADDV as a zero-extending
operation, fmov can be used to move back to the integer side. This
results in a ~0.5% speedup on deepsjeng on Cortex-A57.

A typical __builtin_popcount expansion is now:

	fmov	s0, w0
	cnt	v0.8b, v0.8b
	addv	b0, v0.8b
	fmov	w0, s0

Bootstrap OK, passes regress.

ChangeLog
2020-02-02  Wilco Dijkstra  <wdijkstr@arm.com>

gcc/
	* config/aarch64/aarch64.md (popcount<mode>2): Improve expansion.
	* config/aarch64/aarch64-simd.md
	(aarch64_zero_extend<GPI:mode>_reduc_plus_<VDQV_E:mode>): New pattern.
	* config/aarch64/iterators.md (VDQV_E): New iterator.
testsuite/
	* gcc.target/aarch64/popcnt2.c: New test.

--

Comments

Andrew Pinski Feb. 3, 2020, 5:47 p.m. UTC | #1
On Mon, Feb 3, 2020 at 7:02 AM Wilco Dijkstra <Wilco.Dijkstra@arm.com> wrote:
>
> The popcount expansion uses umov to extend the result and move it back
> to the integer register file.  If we model ADDV as a zero-extending
> operation, fmov can be used to move back to the integer side. This
> results in a ~0.5% speedup on deepsjeng on Cortex-A57.
>
> A typical __builtin_popcount expansion is now:
>
>         fmov    s0, w0
>         cnt     v0.8b, v0.8b
>         addv    b0, v0.8b
>         fmov    w0, s0
>
> Bootstrap OK, passes regress.

You might want to add a testcase that the autovectorizers too.
Something like this:
unsigned f(unsigned char *a)
{
 unsigned char b = 0;
 for(int i = 0; i < 16; i++)
   b+=a[i];
 return b;
}
--- CUT ---

Currently we get also:
        ldr     q0, [x0]
        addv    b0, v0.16b
        umov    w0, v0.b[0]
        ret

Otherwise LGTM.

Thanks,
Andrew

>
> ChangeLog
> 2020-02-02  Wilco Dijkstra  <wdijkstr@arm.com>
>
> gcc/
>         * config/aarch64/aarch64.md (popcount<mode>2): Improve expansion.
>         * config/aarch64/aarch64-simd.md
>         (aarch64_zero_extend<GPI:mode>_reduc_plus_<VDQV_E:mode>): New pattern.
>         * config/aarch64/iterators.md (VDQV_E): New iterator.
> testsuite/
>         * gcc.target/aarch64/popcnt2.c: New test.
>
> --
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 97f46f96968a6bc2f93bbc812931537b819b3b19..34765ff43c1a090a31e2aed64ce95510317ab8c3 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -2460,6 +2460,17 @@ (define_insn "aarch64_reduc_plus_internal<mode>"
>    [(set_attr "type" "neon_reduc_add<q>")]
>  )
>
> +;; ADDV with result zero-extended to SI/DImode (for popcount).
> +(define_insn "aarch64_zero_extend<GPI:mode>_reduc_plus_<VDQV_E:mode>"
> + [(set (match_operand:GPI 0 "register_operand" "=w")
> +       (zero_extend:GPI
> +       (unspec:<VDQV_E:VEL> [(match_operand:VDQV_E 1 "register_operand" "w")]
> +                            UNSPEC_ADDV)))]
> + "TARGET_SIMD"
> + "add<VDQV_E:vp>\\t%<VDQV_E:Vetype>0, %1.<VDQV_E:Vtype>"
> +  [(set_attr "type" "neon_reduc_add<VDQV_E:q>")]
> +)
> +
>  (define_insn "aarch64_reduc_plus_internalv2si"
>   [(set (match_operand:V2SI 0 "register_operand" "=w")
>         (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
> diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
> index 86c2cdfc7973f4b964ba233cfbbe369b24e0ac10..5edc76ee14b55b2b4323530e10bd22b3ffca483e 100644
> --- a/gcc/config/aarch64/aarch64.md
> +++ b/gcc/config/aarch64/aarch64.md
> @@ -4829,7 +4829,6 @@ (define_expand "popcount<mode>2"
>  {
>    rtx v = gen_reg_rtx (V8QImode);
>    rtx v1 = gen_reg_rtx (V8QImode);
> -  rtx r = gen_reg_rtx (QImode);
>    rtx in = operands[1];
>    rtx out = operands[0];
>    if(<MODE>mode == SImode)
> @@ -4843,8 +4842,7 @@ (define_expand "popcount<mode>2"
>      }
>    emit_move_insn (v, gen_lowpart (V8QImode, in));
>    emit_insn (gen_popcountv8qi2 (v1, v));
> -  emit_insn (gen_reduc_plus_scal_v8qi (r, v1));
> -  emit_insn (gen_zero_extendqi<mode>2 (out, r));
> +  emit_insn (gen_aarch64_zero_extend<mode>_reduc_plus_v8qi (out, v1));
>    DONE;
>  })
>
> diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
> index fc973086cb91ae0dc54eeeb0b832d522539d7982..926779bf2442fa60d184ef17308f91996d6e8d1b 100644
> --- a/gcc/config/aarch64/iterators.md
> +++ b/gcc/config/aarch64/iterators.md
> @@ -208,6 +208,9 @@ (define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI V2DI])
>  ;; Advanced SIMD modes (except V2DI) for Integer reduction across lanes.
>  (define_mode_iterator VDQV_S [V8QI V16QI V4HI V8HI V4SI])
>
> +;; Advanced SIMD modes for Integer reduction across lanes (zero/sign extended).
> +(define_mode_iterator VDQV_E [V8QI V16QI V4HI V8HI])
> +
>  ;; All double integer narrow-able modes.
>  (define_mode_iterator VDN [V4HI V2SI DI])
>
> diff --git a/gcc/testsuite/gcc.target/aarch64/popcnt2.c b/gcc/testsuite/gcc.target/aarch64/popcnt2.c
> new file mode 100644
> index 0000000000000000000000000000000000000000..e321858afa4d6ecb6fc7348f39f6e5c6c0c46147
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/popcnt2.c
> @@ -0,0 +1,21 @@
> +/* { dg-do compile } */
> +/* { dg-options "-O2" } */
> +
> +unsigned
> +foo (int x)
> +{
> +  return __builtin_popcount (x);
> +}
> +
> +unsigned long
> +foo1 (int x)
> +{
> +  return __builtin_popcount (x);
> +}
> +
> +/* { dg-final { scan-assembler-not {popcount} } } */
> +/* { dg-final { scan-assembler-times {cnt\t} 2 } } */
> +/* { dg-final { scan-assembler-times {fmov} 4 } } */
> +/* { dg-final { scan-assembler-not {umov} } } */
> +/* { dg-final { scan-assembler-not {uxtw} } } */
> +/* { dg-final { scan-assembler-not {sxtw} } } */
>
Wilco Dijkstra Feb. 4, 2020, 10:34 a.m. UTC | #2
Hi Andrew,

> You might want to add a testcase that the autovectorizers too.
>
> Currently we get also:
>
>        ldr     q0, [x0]
>        addv    b0, v0.16b
>        umov    w0, v0.b[0]
>        ret

My patch doesn't change this case on purpose - there are also many intrinsics 
which generate redundant umovs. That's for a separate patch.

Wilco
Richard Sandiford Feb. 12, 2020, 6:04 p.m. UTC | #3
Wilco Dijkstra <Wilco.Dijkstra@arm.com> writes:
> The popcount expansion uses umov to extend the result and move it back
> to the integer register file.  If we model ADDV as a zero-extending
> operation, fmov can be used to move back to the integer side. This
> results in a ~0.5% speedup on deepsjeng on Cortex-A57.
>
> A typical __builtin_popcount expansion is now:
>
> fmovs0, w0
> cntv0.8b, v0.8b
> addvb0, v0.8b
> fmovw0, s0
>
> Bootstrap OK, passes regress.
>
> ChangeLog
> 2020-02-02  Wilco Dijkstra  <wdijkstr@arm.com>
>
> gcc/
> * config/aarch64/aarch64.md (popcount<mode>2): Improve expansion.
> * config/aarch64/aarch64-simd.md
> (aarch64_zero_extend<GPI:mode>_reduc_plus_<VDQV_E:mode>): New pattern.

I think reordering these and using:

	* config/aarch64/aarch64.md (popcount<mode>2): Use it instead of
	generating separate ADDV and zero_extend patterns.

would be clearer.  OK with that change, thanks.

Richard


> * config/aarch64/iterators.md (VDQV_E): New iterator.
> testsuite/
> * gcc.target/aarch64/popcnt2.c: New test.
>
> --
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 97f46f96968a6bc2f93bbc812931537b819b3b19..34765ff43c1a090a31e2aed64ce95510317ab8c3 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -2460,6 +2460,17 @@ (define_insn "aarch64_reduc_plus_internal<mode>"
>    [(set_attr "type" "neon_reduc_add<q>")]
>  )
>
> +;; ADDV with result zero-extended to SI/DImode (for popcount).
> +(define_insn "aarch64_zero_extend<GPI:mode>_reduc_plus_<VDQV_E:mode>"
> + [(set (match_operand:GPI 0 "register_operand" "=w")
> +       (zero_extend:GPI
> +(unspec:<VDQV_E:VEL> [(match_operand:VDQV_E 1 "register_operand" "w")]
> +     UNSPEC_ADDV)))]
> + "TARGET_SIMD"
> + "add<VDQV_E:vp>\\t%<VDQV_E:Vetype>0, %1.<VDQV_E:Vtype>"
> +  [(set_attr "type" "neon_reduc_add<VDQV_E:q>")]
> +)
> +
>  (define_insn "aarch64_reduc_plus_internalv2si"
>   [(set (match_operand:V2SI 0 "register_operand" "=w")
>         (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
> diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
> index 86c2cdfc7973f4b964ba233cfbbe369b24e0ac10..5edc76ee14b55b2b4323530e10bd22b3ffca483e 100644
> --- a/gcc/config/aarch64/aarch64.md
> +++ b/gcc/config/aarch64/aarch64.md
> @@ -4829,7 +4829,6 @@ (define_expand "popcount<mode>2"
>  {
>    rtx v = gen_reg_rtx (V8QImode);
>    rtx v1 = gen_reg_rtx (V8QImode);
> -  rtx r = gen_reg_rtx (QImode);
>    rtx in = operands[1];
>    rtx out = operands[0];
>    if(<MODE>mode == SImode)
> @@ -4843,8 +4842,7 @@ (define_expand "popcount<mode>2"
>      }
>    emit_move_insn (v, gen_lowpart (V8QImode, in));
>    emit_insn (gen_popcountv8qi2 (v1, v));
> -  emit_insn (gen_reduc_plus_scal_v8qi (r, v1));
> -  emit_insn (gen_zero_extendqi<mode>2 (out, r));
> +  emit_insn (gen_aarch64_zero_extend<mode>_reduc_plus_v8qi (out, v1));
>    DONE;
>  })
>
> diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
> index fc973086cb91ae0dc54eeeb0b832d522539d7982..926779bf2442fa60d184ef17308f91996d6e8d1b 100644
> --- a/gcc/config/aarch64/iterators.md
> +++ b/gcc/config/aarch64/iterators.md
> @@ -208,6 +208,9 @@ (define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI V2DI])
>  ;; Advanced SIMD modes (except V2DI) for Integer reduction across lanes.
>  (define_mode_iterator VDQV_S [V8QI V16QI V4HI V8HI V4SI])
>
> +;; Advanced SIMD modes for Integer reduction across lanes (zero/sign extended).
> +(define_mode_iterator VDQV_E [V8QI V16QI V4HI V8HI])
> +
>  ;; All double integer narrow-able modes.
>  (define_mode_iterator VDN [V4HI V2SI DI])
>
> diff --git a/gcc/testsuite/gcc.target/aarch64/popcnt2.c b/gcc/testsuite/gcc.target/aarch64/popcnt2.c
> new file mode 100644
> index 0000000000000000000000000000000000000000..e321858afa4d6ecb6fc7348f39f6e5c6c0c46147
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/popcnt2.c
> @@ -0,0 +1,21 @@
> +/* { dg-do compile } */
> +/* { dg-options "-O2" } */
> +
> +unsigned
> +foo (int x)
> +{
> +  return __builtin_popcount (x);
> +}
> +
> +unsigned long
> +foo1 (int x)
> +{
> +  return __builtin_popcount (x);
> +}
> +
> +/* { dg-final { scan-assembler-not {popcount} } } */
> +/* { dg-final { scan-assembler-times {cnt\t} 2 } } */
> +/* { dg-final { scan-assembler-times {fmov} 4 } } */
> +/* { dg-final { scan-assembler-not {umov} } } */
> +/* { dg-final { scan-assembler-not {uxtw} } } */
> +/* { dg-final { scan-assembler-not {sxtw} } } */
diff mbox series

Patch

diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 97f46f96968a6bc2f93bbc812931537b819b3b19..34765ff43c1a090a31e2aed64ce95510317ab8c3 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -2460,6 +2460,17 @@  (define_insn "aarch64_reduc_plus_internal<mode>"
   [(set_attr "type" "neon_reduc_add<q>")]
 )
 
+;; ADDV with result zero-extended to SI/DImode (for popcount).
+(define_insn "aarch64_zero_extend<GPI:mode>_reduc_plus_<VDQV_E:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=w")
+       (zero_extend:GPI
+	(unspec:<VDQV_E:VEL> [(match_operand:VDQV_E 1 "register_operand" "w")]
+			     UNSPEC_ADDV)))]
+ "TARGET_SIMD"
+ "add<VDQV_E:vp>\\t%<VDQV_E:Vetype>0, %1.<VDQV_E:Vtype>"
+  [(set_attr "type" "neon_reduc_add<VDQV_E:q>")]
+)
+
 (define_insn "aarch64_reduc_plus_internalv2si"
  [(set (match_operand:V2SI 0 "register_operand" "=w")
        (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 86c2cdfc7973f4b964ba233cfbbe369b24e0ac10..5edc76ee14b55b2b4323530e10bd22b3ffca483e 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -4829,7 +4829,6 @@  (define_expand "popcount<mode>2"
 {
   rtx v = gen_reg_rtx (V8QImode);
   rtx v1 = gen_reg_rtx (V8QImode);
-  rtx r = gen_reg_rtx (QImode);
   rtx in = operands[1];
   rtx out = operands[0];
   if(<MODE>mode == SImode)
@@ -4843,8 +4842,7 @@  (define_expand "popcount<mode>2"
     }
   emit_move_insn (v, gen_lowpart (V8QImode, in));
   emit_insn (gen_popcountv8qi2 (v1, v));
-  emit_insn (gen_reduc_plus_scal_v8qi (r, v1));
-  emit_insn (gen_zero_extendqi<mode>2 (out, r));
+  emit_insn (gen_aarch64_zero_extend<mode>_reduc_plus_v8qi (out, v1));
   DONE;
 })
 
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index fc973086cb91ae0dc54eeeb0b832d522539d7982..926779bf2442fa60d184ef17308f91996d6e8d1b 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -208,6 +208,9 @@  (define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI V2DI])
 ;; Advanced SIMD modes (except V2DI) for Integer reduction across lanes.
 (define_mode_iterator VDQV_S [V8QI V16QI V4HI V8HI V4SI])
 
+;; Advanced SIMD modes for Integer reduction across lanes (zero/sign extended).
+(define_mode_iterator VDQV_E [V8QI V16QI V4HI V8HI])
+
 ;; All double integer narrow-able modes.
 (define_mode_iterator VDN [V4HI V2SI DI])
 
diff --git a/gcc/testsuite/gcc.target/aarch64/popcnt2.c b/gcc/testsuite/gcc.target/aarch64/popcnt2.c
new file mode 100644
index 0000000000000000000000000000000000000000..e321858afa4d6ecb6fc7348f39f6e5c6c0c46147
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/popcnt2.c
@@ -0,0 +1,21 @@ 
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+unsigned
+foo (int x)
+{
+  return __builtin_popcount (x);
+}
+
+unsigned long
+foo1 (int x)
+{
+  return __builtin_popcount (x);
+}
+
+/* { dg-final { scan-assembler-not {popcount} } } */
+/* { dg-final { scan-assembler-times {cnt\t} 2 } } */
+/* { dg-final { scan-assembler-times {fmov} 4 } } */
+/* { dg-final { scan-assembler-not {umov} } } */
+/* { dg-final { scan-assembler-not {uxtw} } } */
+/* { dg-final { scan-assembler-not {sxtw} } } */