diff mbox series

[v1,2/3] x86: Cleanup bounds checking in large memcpy case

Message ID 20220615002533.1741934-2-goldstein.w.n@gmail.com
State New
Headers show
Series [v1,1/3] x86: Fix misordered logic for setting `rep_movsb_stop_threshold` | expand

Commit Message

Noah Goldstein June 15, 2022, 12:25 a.m. UTC
1. Fix incorrect lower-bound threshold in L(large_memcpy_2x).
   Previously was using `__x86_rep_movsb_threshold` and should
   have been using `__x86_shared_non_temporal_threshold`.

2. Avoid reloading __x86_shared_non_temporal_threshold before
   the L(large_memcpy_4x) bounds check.

3. Document the second bounds check for L(large_memcpy_4x)
   more clearly.
---
 manual/tunables.texi                          |  2 +-
 sysdeps/x86/dl-cacheinfo.h                    |  8 +++--
 .../multiarch/memmove-vec-unaligned-erms.S    | 29 ++++++++++++++-----
 3 files changed, 28 insertions(+), 11 deletions(-)

Comments

H.J. Lu June 15, 2022, 1:07 a.m. UTC | #1
On Tue, Jun 14, 2022 at 5:25 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> 1. Fix incorrect lower-bound threshold in L(large_memcpy_2x).
>    Previously was using `__x86_rep_movsb_threshold` and should
>    have been using `__x86_shared_non_temporal_threshold`.
>
> 2. Avoid reloading __x86_shared_non_temporal_threshold before
>    the L(large_memcpy_4x) bounds check.
>
> 3. Document the second bounds check for L(large_memcpy_4x)
>    more clearly.
> ---
>  manual/tunables.texi                          |  2 +-
>  sysdeps/x86/dl-cacheinfo.h                    |  8 +++--
>  .../multiarch/memmove-vec-unaligned-erms.S    | 29 ++++++++++++++-----
>  3 files changed, 28 insertions(+), 11 deletions(-)
>
> diff --git a/manual/tunables.texi b/manual/tunables.texi
> index 1482412078..49daf3eb4a 100644
> --- a/manual/tunables.texi
> +++ b/manual/tunables.texi
> @@ -47,7 +47,7 @@ glibc.malloc.mxfast: 0x0 (min: 0x0, max: 0xffffffffffffffff)
>  glibc.elision.skip_lock_busy: 3 (min: -2147483648, max: 2147483647)
>  glibc.malloc.top_pad: 0x0 (min: 0x0, max: 0xffffffffffffffff)
>  glibc.cpu.x86_rep_stosb_threshold: 0x800 (min: 0x1, max: 0xffffffffffffffff)
> -glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x0, max: 0xffffffffffffffff)
> +glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x0, max: 0x0fffffffffffffff)
>  glibc.cpu.x86_shstk:
>  glibc.cpu.hwcap_mask: 0x6 (min: 0x0, max: 0xffffffffffffffff)
>  glibc.malloc.mmap_max: 0 (min: -2147483648, max: 2147483647)
> diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> index cc3b840f9c..a66152d9cc 100644
> --- a/sysdeps/x86/dl-cacheinfo.h
> +++ b/sysdeps/x86/dl-cacheinfo.h
> @@ -915,9 +915,13 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
>      shared = tunable_size;
>
>    tunable_size = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
> -  /* NB: Ignore the default value 0.  */
> -  if (tunable_size != 0)
> +  /* NB: Ignore the default value 0.  Saturate very large values at
> +     LONG_MAX >> 4.  */
> +  if (tunable_size != 0 && tunable_size <= (LONG_MAX >> 3))
>      non_temporal_threshold = tunable_size;
> +  /* Saturate huge arguments.  */
> +  else if (tunable_size != 0)
> +    non_temporal_threshold = LONG_MAX >> 3;
>
>    tunable_size = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
>    if (tunable_size > minimum_rep_movsb_threshold)

Please update

 TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, non_temporal_threshold,
                           0, SIZE_MAX);

instead.

> diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> index af51177d5d..d1518b8bab 100644
> --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> @@ -118,7 +118,13 @@
>  # define LARGE_LOAD_SIZE (VEC_SIZE * 4)
>  #endif
>
> -/* Amount to shift rdx by to compare for memcpy_large_4x.  */
> +/* Amount to shift __x86_shared_non_temporal_threshold by for
> +   bound for memcpy_large_4x. This is essentially use to to
> +   indicate that the copy is far beyond the scope of L3
> +   (assuming no user config x86_non_temporal_threshold) and to
> +   use a more aggressively unrolled loop.  NB: before
> +   increasing the value also update initialization of
> +   x86_non_temporal_threshold.  */
>  #ifndef LOG_4X_MEMCPY_THRESH
>  # define LOG_4X_MEMCPY_THRESH 4
>  #endif
> @@ -724,9 +730,14 @@ L(skip_short_movsb_check):
>         .p2align 4,, 10
>  #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
>  L(large_memcpy_2x_check):
> -       cmp     __x86_rep_movsb_threshold(%rip), %RDX_LP
> -       jb      L(more_8x_vec_check)
> +       /* Entry from L(large_memcpy_2x) has a redundant load of
> +          __x86_shared_non_temporal_threshold(%rip). L(large_memcpy_2x)
> +          is only use for the non-erms memmove which is generally less
> +          common.  */
>  L(large_memcpy_2x):
> +       mov     __x86_shared_non_temporal_threshold(%rip), %R11_LP
> +       cmp     %R11_LP, %RDX_LP
> +       jb      L(more_8x_vec_check)
>         /* To reach this point it is impossible for dst > src and
>            overlap. Remaining to check is src > dst and overlap. rcx
>            already contains dst - src. Negate rcx to get src - dst. If
> @@ -774,18 +785,21 @@ L(large_memcpy_2x):
>         /* ecx contains -(dst - src). not ecx will return dst - src - 1
>            which works for testing aliasing.  */
>         notl    %ecx
> +       movq    %rdx, %r10
>         testl   $(PAGE_SIZE - VEC_SIZE * 8), %ecx
>         jz      L(large_memcpy_4x)
>
> -       movq    %rdx, %r10
> -       shrq    $LOG_4X_MEMCPY_THRESH, %r10
> -       cmp     __x86_shared_non_temporal_threshold(%rip), %r10
> +       /* r11 has __x86_shared_non_temporal_threshold.  Shift it left
> +          by LOG_4X_MEMCPY_THRESH to get L(large_memcpy_4x) threshold.
> +        */
> +       shlq    $LOG_4X_MEMCPY_THRESH, %r11
> +       cmp     %r11, %rdx
>         jae     L(large_memcpy_4x)
>
>         /* edx will store remainder size for copying tail.  */
>         andl    $(PAGE_SIZE * 2 - 1), %edx
>         /* r10 stores outer loop counter.  */
> -       shrq    $((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10
> +       shrq    $(LOG_PAGE_SIZE + 1), %r10
>         /* Copy 4x VEC at a time from 2 pages.  */
>         .p2align 4
>  L(loop_large_memcpy_2x_outer):
> @@ -850,7 +864,6 @@ L(large_memcpy_2x_end):
>
>         .p2align 4
>  L(large_memcpy_4x):
> -       movq    %rdx, %r10
>         /* edx will store remainder size for copying tail.  */
>         andl    $(PAGE_SIZE * 4 - 1), %edx
>         /* r10 stores outer loop counter.  */
> --
> 2.34.1
>
Noah Goldstein June 15, 2022, 3:57 a.m. UTC | #2
On Tue, Jun 14, 2022 at 6:08 PM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Tue, Jun 14, 2022 at 5:25 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> >
> > 1. Fix incorrect lower-bound threshold in L(large_memcpy_2x).
> >    Previously was using `__x86_rep_movsb_threshold` and should
> >    have been using `__x86_shared_non_temporal_threshold`.
> >
> > 2. Avoid reloading __x86_shared_non_temporal_threshold before
> >    the L(large_memcpy_4x) bounds check.
> >
> > 3. Document the second bounds check for L(large_memcpy_4x)
> >    more clearly.
> > ---
> >  manual/tunables.texi                          |  2 +-
> >  sysdeps/x86/dl-cacheinfo.h                    |  8 +++--
> >  .../multiarch/memmove-vec-unaligned-erms.S    | 29 ++++++++++++++-----
> >  3 files changed, 28 insertions(+), 11 deletions(-)
> >
> > diff --git a/manual/tunables.texi b/manual/tunables.texi
> > index 1482412078..49daf3eb4a 100644
> > --- a/manual/tunables.texi
> > +++ b/manual/tunables.texi
> > @@ -47,7 +47,7 @@ glibc.malloc.mxfast: 0x0 (min: 0x0, max: 0xffffffffffffffff)
> >  glibc.elision.skip_lock_busy: 3 (min: -2147483648, max: 2147483647)
> >  glibc.malloc.top_pad: 0x0 (min: 0x0, max: 0xffffffffffffffff)
> >  glibc.cpu.x86_rep_stosb_threshold: 0x800 (min: 0x1, max: 0xffffffffffffffff)
> > -glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x0, max: 0xffffffffffffffff)
> > +glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x0, max: 0x0fffffffffffffff)
> >  glibc.cpu.x86_shstk:
> >  glibc.cpu.hwcap_mask: 0x6 (min: 0x0, max: 0xffffffffffffffff)
> >  glibc.malloc.mmap_max: 0 (min: -2147483648, max: 2147483647)
> > diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> > index cc3b840f9c..a66152d9cc 100644
> > --- a/sysdeps/x86/dl-cacheinfo.h
> > +++ b/sysdeps/x86/dl-cacheinfo.h
> > @@ -915,9 +915,13 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> >      shared = tunable_size;
> >
> >    tunable_size = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
> > -  /* NB: Ignore the default value 0.  */
> > -  if (tunable_size != 0)
> > +  /* NB: Ignore the default value 0.  Saturate very large values at
> > +     LONG_MAX >> 4.  */
> > +  if (tunable_size != 0 && tunable_size <= (LONG_MAX >> 3))
> >      non_temporal_threshold = tunable_size;
> > +  /* Saturate huge arguments.  */
> > +  else if (tunable_size != 0)
> > +    non_temporal_threshold = LONG_MAX >> 3;
> >
> >    tunable_size = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
> >    if (tunable_size > minimum_rep_movsb_threshold)
>
> Please update
>
>  TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, non_temporal_threshold,
>                            0, SIZE_MAX);
>
> instead.

Fixed in V2.
>
> > diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> > index af51177d5d..d1518b8bab 100644
> > --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> > +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> > @@ -118,7 +118,13 @@
> >  # define LARGE_LOAD_SIZE (VEC_SIZE * 4)
> >  #endif
> >
> > -/* Amount to shift rdx by to compare for memcpy_large_4x.  */
> > +/* Amount to shift __x86_shared_non_temporal_threshold by for
> > +   bound for memcpy_large_4x. This is essentially use to to
> > +   indicate that the copy is far beyond the scope of L3
> > +   (assuming no user config x86_non_temporal_threshold) and to
> > +   use a more aggressively unrolled loop.  NB: before
> > +   increasing the value also update initialization of
> > +   x86_non_temporal_threshold.  */
> >  #ifndef LOG_4X_MEMCPY_THRESH
> >  # define LOG_4X_MEMCPY_THRESH 4
> >  #endif
> > @@ -724,9 +730,14 @@ L(skip_short_movsb_check):
> >         .p2align 4,, 10
> >  #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
> >  L(large_memcpy_2x_check):
> > -       cmp     __x86_rep_movsb_threshold(%rip), %RDX_LP
> > -       jb      L(more_8x_vec_check)
> > +       /* Entry from L(large_memcpy_2x) has a redundant load of
> > +          __x86_shared_non_temporal_threshold(%rip). L(large_memcpy_2x)
> > +          is only use for the non-erms memmove which is generally less
> > +          common.  */
> >  L(large_memcpy_2x):
> > +       mov     __x86_shared_non_temporal_threshold(%rip), %R11_LP
> > +       cmp     %R11_LP, %RDX_LP
> > +       jb      L(more_8x_vec_check)
> >         /* To reach this point it is impossible for dst > src and
> >            overlap. Remaining to check is src > dst and overlap. rcx
> >            already contains dst - src. Negate rcx to get src - dst. If
> > @@ -774,18 +785,21 @@ L(large_memcpy_2x):
> >         /* ecx contains -(dst - src). not ecx will return dst - src - 1
> >            which works for testing aliasing.  */
> >         notl    %ecx
> > +       movq    %rdx, %r10
> >         testl   $(PAGE_SIZE - VEC_SIZE * 8), %ecx
> >         jz      L(large_memcpy_4x)
> >
> > -       movq    %rdx, %r10
> > -       shrq    $LOG_4X_MEMCPY_THRESH, %r10
> > -       cmp     __x86_shared_non_temporal_threshold(%rip), %r10
> > +       /* r11 has __x86_shared_non_temporal_threshold.  Shift it left
> > +          by LOG_4X_MEMCPY_THRESH to get L(large_memcpy_4x) threshold.
> > +        */
> > +       shlq    $LOG_4X_MEMCPY_THRESH, %r11
> > +       cmp     %r11, %rdx
> >         jae     L(large_memcpy_4x)
> >
> >         /* edx will store remainder size for copying tail.  */
> >         andl    $(PAGE_SIZE * 2 - 1), %edx
> >         /* r10 stores outer loop counter.  */
> > -       shrq    $((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10
> > +       shrq    $(LOG_PAGE_SIZE + 1), %r10
> >         /* Copy 4x VEC at a time from 2 pages.  */
> >         .p2align 4
> >  L(loop_large_memcpy_2x_outer):
> > @@ -850,7 +864,6 @@ L(large_memcpy_2x_end):
> >
> >         .p2align 4
> >  L(large_memcpy_4x):
> > -       movq    %rdx, %r10
> >         /* edx will store remainder size for copying tail.  */
> >         andl    $(PAGE_SIZE * 4 - 1), %edx
> >         /* r10 stores outer loop counter.  */
> > --
> > 2.34.1
> >
>
>
> --
> H.J.
diff mbox series

Patch

diff --git a/manual/tunables.texi b/manual/tunables.texi
index 1482412078..49daf3eb4a 100644
--- a/manual/tunables.texi
+++ b/manual/tunables.texi
@@ -47,7 +47,7 @@  glibc.malloc.mxfast: 0x0 (min: 0x0, max: 0xffffffffffffffff)
 glibc.elision.skip_lock_busy: 3 (min: -2147483648, max: 2147483647)
 glibc.malloc.top_pad: 0x0 (min: 0x0, max: 0xffffffffffffffff)
 glibc.cpu.x86_rep_stosb_threshold: 0x800 (min: 0x1, max: 0xffffffffffffffff)
-glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x0, max: 0xffffffffffffffff)
+glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x0, max: 0x0fffffffffffffff)
 glibc.cpu.x86_shstk:
 glibc.cpu.hwcap_mask: 0x6 (min: 0x0, max: 0xffffffffffffffff)
 glibc.malloc.mmap_max: 0 (min: -2147483648, max: 2147483647)
diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
index cc3b840f9c..a66152d9cc 100644
--- a/sysdeps/x86/dl-cacheinfo.h
+++ b/sysdeps/x86/dl-cacheinfo.h
@@ -915,9 +915,13 @@  dl_init_cacheinfo (struct cpu_features *cpu_features)
     shared = tunable_size;
 
   tunable_size = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
-  /* NB: Ignore the default value 0.  */
-  if (tunable_size != 0)
+  /* NB: Ignore the default value 0.  Saturate very large values at
+     LONG_MAX >> 4.  */
+  if (tunable_size != 0 && tunable_size <= (LONG_MAX >> 3))
     non_temporal_threshold = tunable_size;
+  /* Saturate huge arguments.  */
+  else if (tunable_size != 0)
+    non_temporal_threshold = LONG_MAX >> 3;
 
   tunable_size = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
   if (tunable_size > minimum_rep_movsb_threshold)
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index af51177d5d..d1518b8bab 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -118,7 +118,13 @@ 
 # define LARGE_LOAD_SIZE (VEC_SIZE * 4)
 #endif
 
-/* Amount to shift rdx by to compare for memcpy_large_4x.  */
+/* Amount to shift __x86_shared_non_temporal_threshold by for
+   bound for memcpy_large_4x. This is essentially use to to
+   indicate that the copy is far beyond the scope of L3
+   (assuming no user config x86_non_temporal_threshold) and to
+   use a more aggressively unrolled loop.  NB: before
+   increasing the value also update initialization of
+   x86_non_temporal_threshold.  */
 #ifndef LOG_4X_MEMCPY_THRESH
 # define LOG_4X_MEMCPY_THRESH 4
 #endif
@@ -724,9 +730,14 @@  L(skip_short_movsb_check):
 	.p2align 4,, 10
 #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
 L(large_memcpy_2x_check):
-	cmp	__x86_rep_movsb_threshold(%rip), %RDX_LP
-	jb	L(more_8x_vec_check)
+	/* Entry from L(large_memcpy_2x) has a redundant load of
+	   __x86_shared_non_temporal_threshold(%rip). L(large_memcpy_2x)
+	   is only use for the non-erms memmove which is generally less
+	   common.  */
 L(large_memcpy_2x):
+	mov	__x86_shared_non_temporal_threshold(%rip), %R11_LP
+	cmp	%R11_LP, %RDX_LP
+	jb	L(more_8x_vec_check)
 	/* To reach this point it is impossible for dst > src and
 	   overlap. Remaining to check is src > dst and overlap. rcx
 	   already contains dst - src. Negate rcx to get src - dst. If
@@ -774,18 +785,21 @@  L(large_memcpy_2x):
 	/* ecx contains -(dst - src). not ecx will return dst - src - 1
 	   which works for testing aliasing.  */
 	notl	%ecx
+	movq	%rdx, %r10
 	testl	$(PAGE_SIZE - VEC_SIZE * 8), %ecx
 	jz	L(large_memcpy_4x)
 
-	movq	%rdx, %r10
-	shrq	$LOG_4X_MEMCPY_THRESH, %r10
-	cmp	__x86_shared_non_temporal_threshold(%rip), %r10
+	/* r11 has __x86_shared_non_temporal_threshold.  Shift it left
+	   by LOG_4X_MEMCPY_THRESH to get L(large_memcpy_4x) threshold.
+	 */
+	shlq	$LOG_4X_MEMCPY_THRESH, %r11
+	cmp	%r11, %rdx
 	jae	L(large_memcpy_4x)
 
 	/* edx will store remainder size for copying tail.  */
 	andl	$(PAGE_SIZE * 2 - 1), %edx
 	/* r10 stores outer loop counter.  */
-	shrq	$((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10
+	shrq	$(LOG_PAGE_SIZE + 1), %r10
 	/* Copy 4x VEC at a time from 2 pages.  */
 	.p2align 4
 L(loop_large_memcpy_2x_outer):
@@ -850,7 +864,6 @@  L(large_memcpy_2x_end):
 
 	.p2align 4
 L(large_memcpy_4x):
-	movq	%rdx, %r10
 	/* edx will store remainder size for copying tail.  */
 	andl	$(PAGE_SIZE * 4 - 1), %edx
 	/* r10 stores outer loop counter.  */