diff mbox series

[v4,1/2] x86: Cleanup bounds checking in large memcpy case

Message ID 20220615174129.620476-1-goldstein.w.n@gmail.com
State New
Headers show
Series [v4,1/2] x86: Cleanup bounds checking in large memcpy case | expand

Commit Message

Noah Goldstein June 15, 2022, 5:41 p.m. UTC
1. Fix incorrect lower-bound threshold in L(large_memcpy_2x).
   Previously was using `__x86_rep_movsb_threshold` and should
   have been using `__x86_shared_non_temporal_threshold`.

2. Avoid reloading __x86_shared_non_temporal_threshold before
   the L(large_memcpy_4x) bounds check.

3. Document the second bounds check for L(large_memcpy_4x)
   more clearly.
---
 .../multiarch/memmove-vec-unaligned-erms.S    | 29 ++++++++++++++-----
 1 file changed, 21 insertions(+), 8 deletions(-)

Comments

H.J. Lu June 15, 2022, 6:22 p.m. UTC | #1
On Wed, Jun 15, 2022 at 10:41 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> 1. Fix incorrect lower-bound threshold in L(large_memcpy_2x).
>    Previously was using `__x86_rep_movsb_threshold` and should
>    have been using `__x86_shared_non_temporal_threshold`.
>
> 2. Avoid reloading __x86_shared_non_temporal_threshold before
>    the L(large_memcpy_4x) bounds check.
>
> 3. Document the second bounds check for L(large_memcpy_4x)
>    more clearly.
> ---
>  .../multiarch/memmove-vec-unaligned-erms.S    | 29 ++++++++++++++-----
>  1 file changed, 21 insertions(+), 8 deletions(-)
>
> diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> index af51177d5d..d1518b8bab 100644
> --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> @@ -118,7 +118,13 @@
>  # define LARGE_LOAD_SIZE (VEC_SIZE * 4)
>  #endif
>
> -/* Amount to shift rdx by to compare for memcpy_large_4x.  */
> +/* Amount to shift __x86_shared_non_temporal_threshold by for
> +   bound for memcpy_large_4x. This is essentially use to to
> +   indicate that the copy is far beyond the scope of L3
> +   (assuming no user config x86_non_temporal_threshold) and to
> +   use a more aggressively unrolled loop.  NB: before
> +   increasing the value also update initialization of
> +   x86_non_temporal_threshold.  */
>  #ifndef LOG_4X_MEMCPY_THRESH
>  # define LOG_4X_MEMCPY_THRESH 4
>  #endif
> @@ -724,9 +730,14 @@ L(skip_short_movsb_check):
>         .p2align 4,, 10
>  #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
>  L(large_memcpy_2x_check):
> -       cmp     __x86_rep_movsb_threshold(%rip), %RDX_LP
> -       jb      L(more_8x_vec_check)
> +       /* Entry from L(large_memcpy_2x) has a redundant load of
> +          __x86_shared_non_temporal_threshold(%rip). L(large_memcpy_2x)
> +          is only use for the non-erms memmove which is generally less
> +          common.  */
>  L(large_memcpy_2x):
> +       mov     __x86_shared_non_temporal_threshold(%rip), %R11_LP
> +       cmp     %R11_LP, %RDX_LP
> +       jb      L(more_8x_vec_check)
>         /* To reach this point it is impossible for dst > src and
>            overlap. Remaining to check is src > dst and overlap. rcx
>            already contains dst - src. Negate rcx to get src - dst. If
> @@ -774,18 +785,21 @@ L(large_memcpy_2x):
>         /* ecx contains -(dst - src). not ecx will return dst - src - 1
>            which works for testing aliasing.  */
>         notl    %ecx
> +       movq    %rdx, %r10
>         testl   $(PAGE_SIZE - VEC_SIZE * 8), %ecx
>         jz      L(large_memcpy_4x)
>
> -       movq    %rdx, %r10
> -       shrq    $LOG_4X_MEMCPY_THRESH, %r10
> -       cmp     __x86_shared_non_temporal_threshold(%rip), %r10
> +       /* r11 has __x86_shared_non_temporal_threshold.  Shift it left
> +          by LOG_4X_MEMCPY_THRESH to get L(large_memcpy_4x) threshold.
> +        */
> +       shlq    $LOG_4X_MEMCPY_THRESH, %r11
> +       cmp     %r11, %rdx
>         jae     L(large_memcpy_4x)
>
>         /* edx will store remainder size for copying tail.  */
>         andl    $(PAGE_SIZE * 2 - 1), %edx
>         /* r10 stores outer loop counter.  */
> -       shrq    $((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10
> +       shrq    $(LOG_PAGE_SIZE + 1), %r10
>         /* Copy 4x VEC at a time from 2 pages.  */
>         .p2align 4
>  L(loop_large_memcpy_2x_outer):
> @@ -850,7 +864,6 @@ L(large_memcpy_2x_end):
>
>         .p2align 4
>  L(large_memcpy_4x):
> -       movq    %rdx, %r10
>         /* edx will store remainder size for copying tail.  */
>         andl    $(PAGE_SIZE * 4 - 1), %edx
>         /* r10 stores outer loop counter.  */
> --
> 2.34.1
>

LGTM.

Thanks.
Sunil Pandey July 14, 2022, 2:57 a.m. UTC | #2
On Wed, Jun 15, 2022 at 11:24 AM H.J. Lu via Libc-alpha
<libc-alpha@sourceware.org> wrote:
>
> On Wed, Jun 15, 2022 at 10:41 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> >
> > 1. Fix incorrect lower-bound threshold in L(large_memcpy_2x).
> >    Previously was using `__x86_rep_movsb_threshold` and should
> >    have been using `__x86_shared_non_temporal_threshold`.
> >
> > 2. Avoid reloading __x86_shared_non_temporal_threshold before
> >    the L(large_memcpy_4x) bounds check.
> >
> > 3. Document the second bounds check for L(large_memcpy_4x)
> >    more clearly.
> > ---
> >  .../multiarch/memmove-vec-unaligned-erms.S    | 29 ++++++++++++++-----
> >  1 file changed, 21 insertions(+), 8 deletions(-)
> >
> > diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> > index af51177d5d..d1518b8bab 100644
> > --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> > +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> > @@ -118,7 +118,13 @@
> >  # define LARGE_LOAD_SIZE (VEC_SIZE * 4)
> >  #endif
> >
> > -/* Amount to shift rdx by to compare for memcpy_large_4x.  */
> > +/* Amount to shift __x86_shared_non_temporal_threshold by for
> > +   bound for memcpy_large_4x. This is essentially use to to
> > +   indicate that the copy is far beyond the scope of L3
> > +   (assuming no user config x86_non_temporal_threshold) and to
> > +   use a more aggressively unrolled loop.  NB: before
> > +   increasing the value also update initialization of
> > +   x86_non_temporal_threshold.  */
> >  #ifndef LOG_4X_MEMCPY_THRESH
> >  # define LOG_4X_MEMCPY_THRESH 4
> >  #endif
> > @@ -724,9 +730,14 @@ L(skip_short_movsb_check):
> >         .p2align 4,, 10
> >  #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
> >  L(large_memcpy_2x_check):
> > -       cmp     __x86_rep_movsb_threshold(%rip), %RDX_LP
> > -       jb      L(more_8x_vec_check)
> > +       /* Entry from L(large_memcpy_2x) has a redundant load of
> > +          __x86_shared_non_temporal_threshold(%rip). L(large_memcpy_2x)
> > +          is only use for the non-erms memmove which is generally less
> > +          common.  */
> >  L(large_memcpy_2x):
> > +       mov     __x86_shared_non_temporal_threshold(%rip), %R11_LP
> > +       cmp     %R11_LP, %RDX_LP
> > +       jb      L(more_8x_vec_check)
> >         /* To reach this point it is impossible for dst > src and
> >            overlap. Remaining to check is src > dst and overlap. rcx
> >            already contains dst - src. Negate rcx to get src - dst. If
> > @@ -774,18 +785,21 @@ L(large_memcpy_2x):
> >         /* ecx contains -(dst - src). not ecx will return dst - src - 1
> >            which works for testing aliasing.  */
> >         notl    %ecx
> > +       movq    %rdx, %r10
> >         testl   $(PAGE_SIZE - VEC_SIZE * 8), %ecx
> >         jz      L(large_memcpy_4x)
> >
> > -       movq    %rdx, %r10
> > -       shrq    $LOG_4X_MEMCPY_THRESH, %r10
> > -       cmp     __x86_shared_non_temporal_threshold(%rip), %r10
> > +       /* r11 has __x86_shared_non_temporal_threshold.  Shift it left
> > +          by LOG_4X_MEMCPY_THRESH to get L(large_memcpy_4x) threshold.
> > +        */
> > +       shlq    $LOG_4X_MEMCPY_THRESH, %r11
> > +       cmp     %r11, %rdx
> >         jae     L(large_memcpy_4x)
> >
> >         /* edx will store remainder size for copying tail.  */
> >         andl    $(PAGE_SIZE * 2 - 1), %edx
> >         /* r10 stores outer loop counter.  */
> > -       shrq    $((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10
> > +       shrq    $(LOG_PAGE_SIZE + 1), %r10
> >         /* Copy 4x VEC at a time from 2 pages.  */
> >         .p2align 4
> >  L(loop_large_memcpy_2x_outer):
> > @@ -850,7 +864,6 @@ L(large_memcpy_2x_end):
> >
> >         .p2align 4
> >  L(large_memcpy_4x):
> > -       movq    %rdx, %r10
> >         /* edx will store remainder size for copying tail.  */
> >         andl    $(PAGE_SIZE * 4 - 1), %edx
> >         /* r10 stores outer loop counter.  */
> > --
> > 2.34.1
> >
>
> LGTM.
>
> Thanks.
>
> --
> H.J.

I would like to backport this patch to release branches.
Any comments or objections?

--Sunil
diff mbox series

Patch

diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index af51177d5d..d1518b8bab 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -118,7 +118,13 @@ 
 # define LARGE_LOAD_SIZE (VEC_SIZE * 4)
 #endif
 
-/* Amount to shift rdx by to compare for memcpy_large_4x.  */
+/* Amount to shift __x86_shared_non_temporal_threshold by for
+   bound for memcpy_large_4x. This is essentially use to to
+   indicate that the copy is far beyond the scope of L3
+   (assuming no user config x86_non_temporal_threshold) and to
+   use a more aggressively unrolled loop.  NB: before
+   increasing the value also update initialization of
+   x86_non_temporal_threshold.  */
 #ifndef LOG_4X_MEMCPY_THRESH
 # define LOG_4X_MEMCPY_THRESH 4
 #endif
@@ -724,9 +730,14 @@  L(skip_short_movsb_check):
 	.p2align 4,, 10
 #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
 L(large_memcpy_2x_check):
-	cmp	__x86_rep_movsb_threshold(%rip), %RDX_LP
-	jb	L(more_8x_vec_check)
+	/* Entry from L(large_memcpy_2x) has a redundant load of
+	   __x86_shared_non_temporal_threshold(%rip). L(large_memcpy_2x)
+	   is only use for the non-erms memmove which is generally less
+	   common.  */
 L(large_memcpy_2x):
+	mov	__x86_shared_non_temporal_threshold(%rip), %R11_LP
+	cmp	%R11_LP, %RDX_LP
+	jb	L(more_8x_vec_check)
 	/* To reach this point it is impossible for dst > src and
 	   overlap. Remaining to check is src > dst and overlap. rcx
 	   already contains dst - src. Negate rcx to get src - dst. If
@@ -774,18 +785,21 @@  L(large_memcpy_2x):
 	/* ecx contains -(dst - src). not ecx will return dst - src - 1
 	   which works for testing aliasing.  */
 	notl	%ecx
+	movq	%rdx, %r10
 	testl	$(PAGE_SIZE - VEC_SIZE * 8), %ecx
 	jz	L(large_memcpy_4x)
 
-	movq	%rdx, %r10
-	shrq	$LOG_4X_MEMCPY_THRESH, %r10
-	cmp	__x86_shared_non_temporal_threshold(%rip), %r10
+	/* r11 has __x86_shared_non_temporal_threshold.  Shift it left
+	   by LOG_4X_MEMCPY_THRESH to get L(large_memcpy_4x) threshold.
+	 */
+	shlq	$LOG_4X_MEMCPY_THRESH, %r11
+	cmp	%r11, %rdx
 	jae	L(large_memcpy_4x)
 
 	/* edx will store remainder size for copying tail.  */
 	andl	$(PAGE_SIZE * 2 - 1), %edx
 	/* r10 stores outer loop counter.  */
-	shrq	$((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10
+	shrq	$(LOG_PAGE_SIZE + 1), %r10
 	/* Copy 4x VEC at a time from 2 pages.  */
 	.p2align 4
 L(loop_large_memcpy_2x_outer):
@@ -850,7 +864,6 @@  L(large_memcpy_2x_end):
 
 	.p2align 4
 L(large_memcpy_4x):
-	movq	%rdx, %r10
 	/* edx will store remainder size for copying tail.  */
 	andl	$(PAGE_SIZE * 4 - 1), %edx
 	/* r10 stores outer loop counter.  */