diff mbox

[AArch64] Inline mempcpy again

Message ID AM5PR0802MB2610223EAFFF5D9F3CC5378E83D20@AM5PR0802MB2610.eurprd08.prod.outlook.com
State New
Headers show

Commit Message

Wilco Dijkstra June 29, 2017, 4:20 p.m. UTC
Recent changes removed the generic mempcpy inline.  Given GCC still
doesn't optimize mempcpy (PR70140), I am adding it again.  Since
string/string.h no longer includes an architecture specific header, do this
inside include/string.h and for now only on AArch64.

OK for commit?

ChangeLog: 
2017-06-29  Wilco Dijkstra  <wdijkstr@arm.com>

        * include/string.h: (mempcpy): Redirect to __mempcpy_inline.  
        (__mempcpy): Likewise.
        (__mempcpy_inline): New inline function.
        * sysdeps/aarch64/string_private.h: Define _INLINE_mempcpy.

--

Comments

Wilco Dijkstra June 29, 2018, 2:48 p.m. UTC | #1
ping



From: Wilco Dijkstra
Sent: 29 June 2017 17:20
To: libc-alpha@sourceware.org
Cc: nd
Subject: [PATCH][AArch64] Inline mempcpy again
  

Recent changes removed the generic mempcpy inline.  Given GCC still
doesn't optimize mempcpy (PR70140), I am adding it again.  Since
string/string.h no longer includes an architecture specific header, do this
inside include/string.h and for now only on AArch64.

OK for commit?

ChangeLog: 
2017-06-29  Wilco Dijkstra  <wdijkstr@arm.com>

        * include/string.h: (mempcpy): Redirect to __mempcpy_inline.  
        (__mempcpy): Likewise.
        (__mempcpy_inline): New inline function.
        * sysdeps/aarch64/string_private.h: Define _INLINE_mempcpy.

--
diff --git a/include/string.h b/include/string.h
index 069efd0b87010e5fdb64c87ced7af1dc4f54f232..46b90b8f346149f075fad026e562dfb27b658969 100644
--- a/include/string.h
+++ b/include/string.h
@@ -197,4 +197,23 @@ extern char *__strncat_chk (char *__restrict __dest,
                             size_t __len, size_t __destlen) __THROW;
 #endif
 
+#if defined __USE_GNU && defined __OPTIMIZE__ \
+    && defined __extern_always_inline && __GNUC_PREREQ (3,2) \
+    && defined _INLINE_mempcpy
+
+#undef mempcpy
+#undef __mempcpy
+
+#define mempcpy(dest, src, n) __mempcpy_inline (dest, src, n)
+#define __mempcpy(dest, src, n) __mempcpy_inline (dest, src, n)
+
+__extern_always_inline void *
+__mempcpy_inline (void *__restrict __dest,
+                 const void *__restrict __src, size_t __n)
+{
+  return (char *) memcpy (__dest, __src, __n) + __n;
+}
+
+#endif
+
 #endif
diff --git a/sysdeps/aarch64/string_private.h b/sysdeps/aarch64/string_private.h
index 09dedbf3db40cf06077a44af992b399a6b37b48d..8b8fdddcc17a3f69455e72efe9c3616d2d33abe2 100644
--- a/sysdeps/aarch64/string_private.h
+++ b/sysdeps/aarch64/string_private.h
@@ -18,3 +18,6 @@
 
 /* AArch64 implementations support efficient unaligned access.  */
 #define _STRING_ARCH_unaligned 1
+
+/* Inline mempcpy since GCC doesn't optimize it (PR70140).  */
+#define _INLINE_mempcpy 1
Adhemerval Zanella Netto July 5, 2018, 1:03 p.m. UTC | #2
On 29/06/2017 13:20, Wilco Dijkstra wrote:
> Recent changes removed the generic mempcpy inline.  Given GCC still
> doesn't optimize mempcpy (PR70140), I am adding it again.  Since
> string/string.h no longer includes an architecture specific header, do this
> inside include/string.h and for now only on AArch64.

Should we reopen PR70140 then, its current RESOLVED/FIXED state gives
indicates recent gcc does not show the issue.  I also noted some
discussion on PR81657, which is also set as RESOLVED/FIXED.

> 
> OK for commit?
> 
> ChangeLog: 
> 2017-06-29  Wilco Dijkstra  <wdijkstr@arm.com>
> 
>         * include/string.h: (mempcpy): Redirect to __mempcpy_inline.  
>         (__mempcpy): Likewise.
>         (__mempcpy_inline): New inline function.
>         * sysdeps/aarch64/string_private.h: Define _INLINE_mempcpy.

We removed because the consensus is we do not want this kind of
optimization to be provided by libc anymore, adding this exception
can potentially get the previous state we are with multiple
architectures providing its own string.h/string_private.h hacks. 
I see adding this patch  is a step back and I hardly think it is really 
an optimization which yield a large performance improvements to add 
an exception.

If optimizing mempcpy is really required I think a better option would
to provide the optimized based on current memcpy/memmove. I have created
an implementation [1] which provides the expected optimized mempcpy with
the cost of only extra 'mov' instruction on both memcpy and memmove (to
use the same memcpy/memmove code)

[1] https://sourceware.org/git/?p=glibc.git;a=shortlog;h=refs/heads/azanella/aarch64-mempcpy

> 
> --
> diff --git a/include/string.h b/include/string.h
> index 069efd0b87010e5fdb64c87ced7af1dc4f54f232..46b90b8f346149f075fad026e562dfb27b658969 100644
> --- a/include/string.h
> +++ b/include/string.h
> @@ -197,4 +197,23 @@ extern char *__strncat_chk (char *__restrict __dest,
>  			    size_t __len, size_t __destlen) __THROW;
>  #endif
>  
> +#if defined __USE_GNU && defined __OPTIMIZE__ \
> +    && defined __extern_always_inline && __GNUC_PREREQ (3,2) \
> +    && defined _INLINE_mempcpy
> +
> +#undef mempcpy
> +#undef __mempcpy
> +
> +#define mempcpy(dest, src, n) __mempcpy_inline (dest, src, n)
> +#define __mempcpy(dest, src, n) __mempcpy_inline (dest, src, n)
> +
> +__extern_always_inline void *
> +__mempcpy_inline (void *__restrict __dest,
> +		  const void *__restrict __src, size_t __n)
> +{
> +  return (char *) memcpy (__dest, __src, __n) + __n;
> +}
> +
> +#endif
> +
>  #endif
> diff --git a/sysdeps/aarch64/string_private.h b/sysdeps/aarch64/string_private.h
> index 09dedbf3db40cf06077a44af992b399a6b37b48d..8b8fdddcc17a3f69455e72efe9c3616d2d33abe2 100644
> --- a/sysdeps/aarch64/string_private.h
> +++ b/sysdeps/aarch64/string_private.h
> @@ -18,3 +18,6 @@
>  
>  /* AArch64 implementations support efficient unaligned access.  */
>  #define _STRING_ARCH_unaligned 1
> +
> +/* Inline mempcpy since GCC doesn't optimize it (PR70140).  */
> +#define _INLINE_mempcpy 1
>
Siddhesh Poyarekar July 6, 2018, 5:34 a.m. UTC | #3
On 07/05/2018 06:33 PM, Adhemerval Zanella wrote:
> If optimizing mempcpy is really required I think a better option would
> to provide the optimized based on current memcpy/memmove. I have created
> an implementation [1] which provides the expected optimized mempcpy with
> the cost of only extra 'mov' instruction on both memcpy and memmove (to
> use the same memcpy/memmove code)
> 
> [1] https://sourceware.org/git/?p=glibc.git;a=shortlog;h=refs/heads/azanella/aarch64-mempcpy

I had proposed the exact same thing for __memcpy_chk[1] for aarch64, 
which was rejected under the pretext that this should be handled 
completely by gcc.  If that consensus has changed then I'd like to 
propose that patch again as well.

However, I do understand that this is much better off being fixed in gcc 
so we should probably try and understand the limitations of doing that 
first.  Wilco, does anything prevent gcc from doing this optimization 
for mempcpy or __memcpy_chk?

Siddhesh

[1] 
http://sourceware-org.1504.n7.nabble.com/PATCH-0-2-Multiarch-hooks-for-memcpy-variants-td463236.html

>>
>> --
>> diff --git a/include/string.h b/include/string.h
>> index 069efd0b87010e5fdb64c87ced7af1dc4f54f232..46b90b8f346149f075fad026e562dfb27b658969 100644
>> --- a/include/string.h
>> +++ b/include/string.h
>> @@ -197,4 +197,23 @@ extern char *__strncat_chk (char *__restrict __dest,
>>   			    size_t __len, size_t __destlen) __THROW;
>>   #endif
>>   
>> +#if defined __USE_GNU && defined __OPTIMIZE__ \
>> +    && defined __extern_always_inline && __GNUC_PREREQ (3,2) \
>> +    && defined _INLINE_mempcpy
>> +
>> +#undef mempcpy
>> +#undef __mempcpy
>> +
>> +#define mempcpy(dest, src, n) __mempcpy_inline (dest, src, n)
>> +#define __mempcpy(dest, src, n) __mempcpy_inline (dest, src, n)
>> +
>> +__extern_always_inline void *
>> +__mempcpy_inline (void *__restrict __dest,
>> +		  const void *__restrict __src, size_t __n)
>> +{
>> +  return (char *) memcpy (__dest, __src, __n) + __n;
>> +}
>> +
>> +#endif
>> +
>>   #endif
>> diff --git a/sysdeps/aarch64/string_private.h b/sysdeps/aarch64/string_private.h
>> index 09dedbf3db40cf06077a44af992b399a6b37b48d..8b8fdddcc17a3f69455e72efe9c3616d2d33abe2 100644
>> --- a/sysdeps/aarch64/string_private.h
>> +++ b/sysdeps/aarch64/string_private.h
>> @@ -18,3 +18,6 @@
>>   
>>   /* AArch64 implementations support efficient unaligned access.  */
>>   #define _STRING_ARCH_unaligned 1
>> +
>> +/* Inline mempcpy since GCC doesn't optimize it (PR70140).  */
>> +#define _INLINE_mempcpy 1
>>
Adhemerval Zanella Netto July 6, 2018, 12:48 p.m. UTC | #4
On 06/07/2018 02:34, Siddhesh Poyarekar wrote:
> On 07/05/2018 06:33 PM, Adhemerval Zanella wrote:
>> If optimizing mempcpy is really required I think a better option would
>> to provide the optimized based on current memcpy/memmove. I have created
>> an implementation [1] which provides the expected optimized mempcpy with
>> the cost of only extra 'mov' instruction on both memcpy and memmove (to
>> use the same memcpy/memmove code)
>>
>> [1] https://sourceware.org/git/?p=glibc.git;a=shortlog;h=refs/heads/azanella/aarch64-mempcpy
> 
> I had proposed the exact same thing for __memcpy_chk[1] for aarch64, which was rejected under the pretext that this should be handled completely by gcc.  If that consensus has changed then I'd like to propose that patch again as well.
> 
> However, I do understand that this is much better off being fixed in gcc so we should probably try and understand the limitations of doing that first.  Wilco, does anything prevent gcc from doing this optimization for mempcpy or __memcpy_chk?
> 
> Siddhesh

I tend to agree it should indeed be handled by compiler, but checking on
bugzilla reports about the changes on gcc, it seems the idea is not make
it generic for all platforms, but rather dependent of the backend plus
targeting libc (that's why PR70140 seems to be reverted).

In any case, if the idea is indeed optimize mempcpy and GCC won't get
the required support anytime soon I still prefer to *not* get back in
adding the code on string*.h headers. This patch was just one idea
that we can get a similar performance directly on the assembly routines
(which the advantage if compiler does not transform mempcpy to memcpy
it will still get some improvements).

> 
> [1] http://sourceware-org.1504.n7.nabble.com/PATCH-0-2-Multiarch-hooks-for-memcpy-variants-td463236.html
> 
>>>
>>> -- 
>>> diff --git a/include/string.h b/include/string.h
>>> index 069efd0b87010e5fdb64c87ced7af1dc4f54f232..46b90b8f346149f075fad026e562dfb27b658969 100644
>>> --- a/include/string.h
>>> +++ b/include/string.h
>>> @@ -197,4 +197,23 @@ extern char *__strncat_chk (char *__restrict __dest,
>>>                   size_t __len, size_t __destlen) __THROW;
>>>   #endif
>>>   +#if defined __USE_GNU && defined __OPTIMIZE__ \
>>> +    && defined __extern_always_inline && __GNUC_PREREQ (3,2) \
>>> +    && defined _INLINE_mempcpy
>>> +
>>> +#undef mempcpy
>>> +#undef __mempcpy
>>> +
>>> +#define mempcpy(dest, src, n) __mempcpy_inline (dest, src, n)
>>> +#define __mempcpy(dest, src, n) __mempcpy_inline (dest, src, n)
>>> +
>>> +__extern_always_inline void *
>>> +__mempcpy_inline (void *__restrict __dest,
>>> +          const void *__restrict __src, size_t __n)
>>> +{
>>> +  return (char *) memcpy (__dest, __src, __n) + __n;
>>> +}
>>> +
>>> +#endif
>>> +
>>>   #endif
>>> diff --git a/sysdeps/aarch64/string_private.h b/sysdeps/aarch64/string_private.h
>>> index 09dedbf3db40cf06077a44af992b399a6b37b48d..8b8fdddcc17a3f69455e72efe9c3616d2d33abe2 100644
>>> --- a/sysdeps/aarch64/string_private.h
>>> +++ b/sysdeps/aarch64/string_private.h
>>> @@ -18,3 +18,6 @@
>>>     /* AArch64 implementations support efficient unaligned access.  */
>>>   #define _STRING_ARCH_unaligned 1
>>> +
>>> +/* Inline mempcpy since GCC doesn't optimize it (PR70140).  */
>>> +#define _INLINE_mempcpy 1
>>>
diff mbox

Patch

diff --git a/include/string.h b/include/string.h
index 069efd0b87010e5fdb64c87ced7af1dc4f54f232..46b90b8f346149f075fad026e562dfb27b658969 100644
--- a/include/string.h
+++ b/include/string.h
@@ -197,4 +197,23 @@  extern char *__strncat_chk (char *__restrict __dest,
 			    size_t __len, size_t __destlen) __THROW;
 #endif
 
+#if defined __USE_GNU && defined __OPTIMIZE__ \
+    && defined __extern_always_inline && __GNUC_PREREQ (3,2) \
+    && defined _INLINE_mempcpy
+
+#undef mempcpy
+#undef __mempcpy
+
+#define mempcpy(dest, src, n) __mempcpy_inline (dest, src, n)
+#define __mempcpy(dest, src, n) __mempcpy_inline (dest, src, n)
+
+__extern_always_inline void *
+__mempcpy_inline (void *__restrict __dest,
+		  const void *__restrict __src, size_t __n)
+{
+  return (char *) memcpy (__dest, __src, __n) + __n;
+}
+
+#endif
+
 #endif
diff --git a/sysdeps/aarch64/string_private.h b/sysdeps/aarch64/string_private.h
index 09dedbf3db40cf06077a44af992b399a6b37b48d..8b8fdddcc17a3f69455e72efe9c3616d2d33abe2 100644
--- a/sysdeps/aarch64/string_private.h
+++ b/sysdeps/aarch64/string_private.h
@@ -18,3 +18,6 @@ 
 
 /* AArch64 implementations support efficient unaligned access.  */
 #define _STRING_ARCH_unaligned 1
+
+/* Inline mempcpy since GCC doesn't optimize it (PR70140).  */
+#define _INLINE_mempcpy 1