diff mbox series

AArch64: Optimize memchr

Message ID PAWPR08MB89829827FB8D528B37AB2ED783FD9@PAWPR08MB8982.eurprd08.prod.outlook.com
State New
Headers show
Series AArch64: Optimize memchr | expand

Commit Message

Wilco Dijkstra Jan. 12, 2023, 3:56 p.m. UTC
Optimize the main loop - large strings are 40% faster on modern CPUs.
Passes regress.

---

Comments

Szabolcs Nagy Jan. 13, 2023, 12:27 p.m. UTC | #1
The 01/12/2023 15:56, Wilco Dijkstra wrote:
> Optimize the main loop - large strings are 40% faster on modern CPUs.
> Passes regress.

please commit it, thanks.

Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>


> 
> ---
> 
> diff --git a/sysdeps/aarch64/memchr.S b/sysdeps/aarch64/memchr.S
> index 1cd32bf8cffa82d665304d54d2a4d4f75d4ff541..1c99d45fbb506c86a1db5b4d45de49b33d8635c9 100644
> --- a/sysdeps/aarch64/memchr.S
> +++ b/sysdeps/aarch64/memchr.S
> @@ -30,7 +30,6 @@
>  # define MEMCHR __memchr
>  #endif
> 
> -/* Arguments and results.  */
>  #define srcin          x0
>  #define chrin          w1
>  #define cntin          x2
> @@ -73,42 +72,44 @@ ENTRY (MEMCHR)
> 
>         rbit    synd, synd
>         clz     synd, synd
> -       add     result, srcin, synd, lsr 2
>         cmp     cntin, synd, lsr 2
> +       add     result, srcin, synd, lsr 2
>         csel    result, result, xzr, hi
>         ret
> 
> +       .p2align 3
>  L(start_loop):
>         sub     tmp, src, srcin
> -       add     tmp, tmp, 16
> +       add     tmp, tmp, 17
>         subs    cntrem, cntin, tmp
> -       b.ls    L(nomatch)
> +       b.lo    L(nomatch)
> 
>         /* Make sure that it won't overread by a 16-byte chunk */
> -       add     tmp, cntrem, 15
> -       tbnz    tmp, 4, L(loop32_2)
> -
> +       tbz     cntrem, 4, L(loop32_2)
> +       sub     src, src, 16
>         .p2align 4
>  L(loop32):
> -       ldr     qdata, [src, 16]!
> +       ldr     qdata, [src, 32]!
>         cmeq    vhas_chr.16b, vdata.16b, vrepchr.16b
>         umaxp   vend.16b, vhas_chr.16b, vhas_chr.16b            /* 128->64 */
>         fmov    synd, dend
>         cbnz    synd, L(end)
> 
>  L(loop32_2):
> -       ldr     qdata, [src, 16]!
> -       subs    cntrem, cntrem, 32
> +       ldr     qdata, [src, 16]
>         cmeq    vhas_chr.16b, vdata.16b, vrepchr.16b
> -       b.ls    L(end)
> +       subs    cntrem, cntrem, 32
> +       b.lo    L(end_2)
>         umaxp   vend.16b, vhas_chr.16b, vhas_chr.16b            /* 128->64 */
>         fmov    synd, dend
>         cbz     synd, L(loop32)
> +L(end_2):
> +       add     src, src, 16
>  L(end):
>         shrn    vend.8b, vhas_chr.8h, 4         /* 128->64 */
> +       sub     cntrem, src, srcin
>         fmov    synd, dend
> -       add     tmp, srcin, cntin
> -       sub     cntrem, tmp, src
> +       sub     cntrem, cntin, cntrem
>  #ifndef __AARCH64EB__
>         rbit    synd, synd
>  #endif
>
diff mbox series

Patch

diff --git a/sysdeps/aarch64/memchr.S b/sysdeps/aarch64/memchr.S
index 1cd32bf8cffa82d665304d54d2a4d4f75d4ff541..1c99d45fbb506c86a1db5b4d45de49b33d8635c9 100644
--- a/sysdeps/aarch64/memchr.S
+++ b/sysdeps/aarch64/memchr.S
@@ -30,7 +30,6 @@ 
 # define MEMCHR __memchr
 #endif
 
-/* Arguments and results.  */
 #define srcin		x0
 #define chrin		w1
 #define cntin		x2
@@ -73,42 +72,44 @@  ENTRY (MEMCHR)
 
 	rbit	synd, synd
 	clz	synd, synd
-	add	result, srcin, synd, lsr 2
 	cmp	cntin, synd, lsr 2
+	add	result, srcin, synd, lsr 2
 	csel	result, result, xzr, hi
 	ret
 
+	.p2align 3
 L(start_loop):
 	sub	tmp, src, srcin
-	add	tmp, tmp, 16
+	add	tmp, tmp, 17
 	subs	cntrem, cntin, tmp
-	b.ls	L(nomatch)
+	b.lo	L(nomatch)
 
 	/* Make sure that it won't overread by a 16-byte chunk */
-	add	tmp, cntrem, 15
-	tbnz	tmp, 4, L(loop32_2)
-
+	tbz	cntrem, 4, L(loop32_2)
+	sub	src, src, 16
 	.p2align 4
 L(loop32):
-	ldr	qdata, [src, 16]!
+	ldr	qdata, [src, 32]!
 	cmeq	vhas_chr.16b, vdata.16b, vrepchr.16b
 	umaxp	vend.16b, vhas_chr.16b, vhas_chr.16b		/* 128->64 */
 	fmov	synd, dend
 	cbnz	synd, L(end)
 
 L(loop32_2):
-	ldr	qdata, [src, 16]!
-	subs	cntrem, cntrem, 32
+	ldr	qdata, [src, 16]
 	cmeq	vhas_chr.16b, vdata.16b, vrepchr.16b
-	b.ls	L(end)
+	subs	cntrem, cntrem, 32
+	b.lo	L(end_2)
 	umaxp	vend.16b, vhas_chr.16b, vhas_chr.16b		/* 128->64 */
 	fmov	synd, dend
 	cbz	synd, L(loop32)
+L(end_2):
+	add	src, src, 16
 L(end):
 	shrn	vend.8b, vhas_chr.8h, 4		/* 128->64 */
+	sub	cntrem, src, srcin
 	fmov	synd, dend
-	add	tmp, srcin, cntin
-	sub	cntrem, tmp, src
+	sub	cntrem, cntin, cntrem
 #ifndef __AARCH64EB__
 	rbit	synd, synd
 #endif