diff mbox series

i386: Fix up _mm_loadu_si{16,32} [PR99754]

Message ID Yiz0cP7Ta5bSNKzG@tucnak
State New
Headers show
Series i386: Fix up _mm_loadu_si{16,32} [PR99754] | expand

Commit Message

Jakub Jelinek March 12, 2022, 7:28 p.m. UTC
Hi!

These intrinsics are supposed to do an unaligned may_alias load
of a 16-bit or 32-bit value and store it as the first element of
a 128-bit integer vector, with all other elements cleared.

The current _mm_storeu_* implementation implements that correctly, uses
__*_u types to do the store and extracts the first element of a vector into
it.
But _mm_loadu_si{16,32} gets it all wrong.  It performs an aligned
non-may_alias load and because _mm_set_epi{16,32} has the args reversed,
it also inserts it into the last vector element instead of first.

The following patch fixes that, bootstrapped/regtested on x86_64-linux
and i686-linux, ok for trunk and affected release branches?

Note, while the Intrinsics guide for _mm_loadu_si32 says SSE2,
for _mm_loadu_si16 it says strangely SSE.  But the intrinsics
returns __m128i, which is only defined in emmintrin.h, and
_mm_set_epi16 is also only SSE2 and later in emmintrin.h.
Even clang defines it in emmintrin.h and ends up with inlining
failure when calling _mm_loadu_si16 from sse,no-sse2 function.
So, isn't that a bug in the intrinsic guide instead?

2022-03-12  Jakub Jelinek  <jakub@redhat.com>

	PR target/99754
	* config/i386/emmintrin.h (_mm_loadu_si32): Put loaded value into
	first 	rather than last element of the vector, use __m32_u to do
	a really unaligned load, use just 0 instead of (int)0.
	(_mm_loadu_si16): Put loaded value into first rather than last
	element of the vector, use __m16_u to do a really unaligned load,
	use just 0 instead of (short)0.

	* gcc.target/i386/pr99754-1.c: New test.
	* gcc.target/i386/pr99754-2.c: New test.


	Jakub

Comments

Hongtao Liu March 13, 2022, 1:34 p.m. UTC | #1
On Sun, Mar 13, 2022 at 3:28 AM Jakub Jelinek <jakub@redhat.com> wrote:
>
> Hi!
>
> These intrinsics are supposed to do an unaligned may_alias load
> of a 16-bit or 32-bit value and store it as the first element of
> a 128-bit integer vector, with all other elements cleared.
>
> The current _mm_storeu_* implementation implements that correctly, uses
> __*_u types to do the store and extracts the first element of a vector into
> it.
> But _mm_loadu_si{16,32} gets it all wrong.  It performs an aligned
> non-may_alias load and because _mm_set_epi{16,32} has the args reversed,
> it also inserts it into the last vector element instead of first.
>
> The following patch fixes that, bootstrapped/regtested on x86_64-linux
> and i686-linux, ok for trunk and affected release branches?
LGTM, thanks for handling this.
>
> Note, while the Intrinsics guide for _mm_loadu_si32 says SSE2,
> for _mm_loadu_si16 it says strangely SSE.  But the intrinsics
> returns __m128i, which is only defined in emmintrin.h, and
> _mm_set_epi16 is also only SSE2 and later in emmintrin.h.
> Even clang defines it in emmintrin.h and ends up with inlining
> failure when calling _mm_loadu_si16 from sse,no-sse2 function.
> So, isn't that a bug in the intrinsic guide instead?
I think it's a bug, it's supposed to generate movzx + movd, and movd
is under sse2, and have reported it to the colleague who maintains
Intel intrinsic guide.

Similar bug for
_mm_loadu_si64
_mm_storeu_si16
_mm_storeu_si64

>
> 2022-03-12  Jakub Jelinek  <jakub@redhat.com>
>
>         PR target/99754
>         * config/i386/emmintrin.h (_mm_loadu_si32): Put loaded value into
>         first   rather than last element of the vector, use __m32_u to do
>         a really unaligned load, use just 0 instead of (int)0.
>         (_mm_loadu_si16): Put loaded value into first rather than last
>         element of the vector, use __m16_u to do a really unaligned load,
>         use just 0 instead of (short)0.
>
>         * gcc.target/i386/pr99754-1.c: New test.
>         * gcc.target/i386/pr99754-2.c: New test.
>
> --- gcc/config/i386/emmintrin.h.jj      2022-01-11 23:11:21.766298923 +0100
> +++ gcc/config/i386/emmintrin.h 2022-03-11 16:47:24.789884825 +0100
> @@ -718,14 +718,13 @@ _mm_loadu_si64 (void const *__P)
>  extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
>  _mm_loadu_si32 (void const *__P)
>  {
> -  return _mm_set_epi32 (*(int *)__P, (int)0, (int)0, (int)0);
> +  return _mm_set_epi32 (0, 0, 0, (*(__m32_u *)__P)[0]);
>  }
>
>  extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
>  _mm_loadu_si16 (void const *__P)
>  {
> -  return _mm_set_epi16 (*(short *)__P, (short)0, (short)0, (short)0,
> -                       (short)0, (short)0, (short)0, (short)0);
> +  return _mm_set_epi16 (0, 0, 0, 0, 0, 0, 0, (*(__m16_u *)__P)[0]);
>  }
>
>  extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
> --- gcc/testsuite/gcc.target/i386/pr99754-1.c.jj        2022-03-11 16:43:30.621120896 +0100
> +++ gcc/testsuite/gcc.target/i386/pr99754-1.c   2022-03-11 16:43:18.250291856 +0100
> @@ -0,0 +1,20 @@
> +/* PR target/99754 */
> +/* { dg-do run } */
> +/* { dg-options "-O2 -msse2" } */
> +/* { dg-require-effective-target sse2 } */
> +
> +#include "sse2-check.h"
> +#include <emmintrin.h>
> +
> +static void
> +sse2_test (void)
> +{
> +  union { unsigned char buf[32]; long long ll; } u;
> +  u.buf[1] = 0xfe;
> +  u.buf[2] = 0xca;
> +  u.buf[17] = 0xaa;
> +  u.buf[18] = 0x55;
> +  _mm_storeu_si16 (&u.buf[17], _mm_loadu_si16 (&u.buf[1]));
> +  if (u.buf[17] != 0xfe || u.buf[18] != 0xca)
> +    abort ();
> +}
> --- gcc/testsuite/gcc.target/i386/pr99754-2.c.jj        2022-03-11 16:43:41.701967763 +0100
> +++ gcc/testsuite/gcc.target/i386/pr99754-2.c   2022-03-11 16:45:16.391659211 +0100
> @@ -0,0 +1,24 @@
> +/* PR target/99754 */
> +/* { dg-do run } */
> +/* { dg-options "-O2 -msse2" } */
> +/* { dg-require-effective-target sse2 } */
> +
> +#include "sse2-check.h"
> +#include <emmintrin.h>
> +
> +static void
> +sse2_test (void)
> +{
> +  union { unsigned char buf[32]; long long ll; } u;
> +  u.buf[1] = 0xbe;
> +  u.buf[2] = 0xba;
> +  u.buf[3] = 0xfe;
> +  u.buf[4] = 0xca;
> +  u.buf[17] = 0xaa;
> +  u.buf[18] = 0x55;
> +  u.buf[19] = 0xaa;
> +  u.buf[20] = 0x55;
> +  _mm_storeu_si32 (&u.buf[17], _mm_loadu_si32 (&u.buf[1]));
> +  if (u.buf[17] != 0xbe || u.buf[18] != 0xba || u.buf[19] != 0xfe || u.buf[20] != 0xca)
> +    abort ();
> +}
>
>         Jakub
>
Jakub Jelinek March 14, 2022, 11:25 a.m. UTC | #2
On Sun, Mar 13, 2022 at 09:34:10PM +0800, Hongtao Liu wrote:
> LGTM, thanks for handling this.

Thanks, committed.

> > Note, while the Intrinsics guide for _mm_loadu_si32 says SSE2,
> > for _mm_loadu_si16 it says strangely SSE.  But the intrinsics
> > returns __m128i, which is only defined in emmintrin.h, and
> > _mm_set_epi16 is also only SSE2 and later in emmintrin.h.
> > Even clang defines it in emmintrin.h and ends up with inlining
> > failure when calling _mm_loadu_si16 from sse,no-sse2 function.
> > So, isn't that a bug in the intrinsic guide instead?
> I think it's a bug, it's supposed to generate movzx + movd, and movd
> is under sse2, and have reported it to the colleague who maintains
> Intel intrinsic guide.
> 
> Similar bug for
> _mm_loadu_si64
> _mm_storeu_si16
> _mm_storeu_si64

Currently it emits pxor + pinsrw, but even those are SSE2 instructions,
unless they use a MMX register (then it is MMX and SSE).
I agree that movzwl + movd seems better than pxor + pinsrw though.
So, do we want to help it a little bit then?  Like:

2022-03-14  Jakub Jelinek  <jakub@redhat.com>

	* config/i386/eemintrin.h (_mm_loadu_si16): Use _mm_set_epi32 instead
	of _mm_set_epi16 and zero extend the memory load.

	* gcc.target/i386/pr95483-1.c: Use -msse2 instead of -msse in
	dg-options, allow movzwl+movd instead of pxor with pinsrw.

--- gcc/config/i386/emmintrin.h.jj	2022-03-14 10:44:29.402617685 +0100
+++ gcc/config/i386/emmintrin.h	2022-03-14 11:58:18.062666257 +0100
@@ -724,7 +724,7 @@ _mm_loadu_si32 (void const *__P)
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
 _mm_loadu_si16 (void const *__P)
 {
-  return _mm_set_epi16 (0, 0, 0, 0, 0, 0, 0, (*(__m16_u *)__P)[0]);
+  return _mm_set_epi32 (0, 0, 0, (unsigned short) ((*(__m16_u *)__P)[0]));
 }
 
 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
--- gcc/testsuite/gcc.target/i386/pr95483-1.c.jj	2020-10-14 22:05:19.380856952 +0200
+++ gcc/testsuite/gcc.target/i386/pr95483-1.c	2022-03-14 12:11:07.716891710 +0100
@@ -1,7 +1,7 @@
 /* { dg-do compile } */
-/* { dg-options "-O2 -msse" } */
-/* { dg-final { scan-assembler-times "pxor\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "pinsrw\[ \\t\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-options "-O2 -msse2" } */
+/* { dg-final { scan-assembler-times "(?:movzwl\[ \\t\]+\[^\n\]*|pxor\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+)(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "(?:movd|pinsrw)\[ \\t\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
 /* { dg-final { scan-assembler-times "pextrw\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*(?:\n|\[ \\t\]+#)" 1 } } */
 
 


	Jakub
Hongtao Liu March 14, 2022, 12:20 p.m. UTC | #3
On Mon, Mar 14, 2022 at 7:25 PM Jakub Jelinek <jakub@redhat.com> wrote:
>
> On Sun, Mar 13, 2022 at 09:34:10PM +0800, Hongtao Liu wrote:
> > LGTM, thanks for handling this.
>
> Thanks, committed.
>
> > > Note, while the Intrinsics guide for _mm_loadu_si32 says SSE2,
> > > for _mm_loadu_si16 it says strangely SSE.  But the intrinsics
> > > returns __m128i, which is only defined in emmintrin.h, and
> > > _mm_set_epi16 is also only SSE2 and later in emmintrin.h.
> > > Even clang defines it in emmintrin.h and ends up with inlining
> > > failure when calling _mm_loadu_si16 from sse,no-sse2 function.
> > > So, isn't that a bug in the intrinsic guide instead?
> > I think it's a bug, it's supposed to generate movzx + movd, and movd
> > is under sse2, and have reported it to the colleague who maintains
> > Intel intrinsic guide.
> >
> > Similar bug for
> > _mm_loadu_si64
> > _mm_storeu_si16
> > _mm_storeu_si64
>
> Currently it emits pxor + pinsrw, but even those are SSE2 instructions,
> unless they use a MMX register (then it is MMX and SSE).
> I agree that movzwl + movd seems better than pxor + pinsrw though.
> So, do we want to help it a little bit then?  Like:
>
> 2022-03-14  Jakub Jelinek  <jakub@redhat.com>
>
>         * config/i386/eemintrin.h (_mm_loadu_si16): Use _mm_set_epi32 instead
>         of _mm_set_epi16 and zero extend the memory load.
>
>         * gcc.target/i386/pr95483-1.c: Use -msse2 instead of -msse in
>         dg-options, allow movzwl+movd instead of pxor with pinsrw.
>
> --- gcc/config/i386/emmintrin.h.jj      2022-03-14 10:44:29.402617685 +0100
> +++ gcc/config/i386/emmintrin.h 2022-03-14 11:58:18.062666257 +0100
> @@ -724,7 +724,7 @@ _mm_loadu_si32 (void const *__P)
>  extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
>  _mm_loadu_si16 (void const *__P)
>  {
> -  return _mm_set_epi16 (0, 0, 0, 0, 0, 0, 0, (*(__m16_u *)__P)[0]);
> +  return _mm_set_epi32 (0, 0, 0, (unsigned short) ((*(__m16_u *)__P)[0]));
>  }
Under avx512fp16,  the former directly generates vmovw, but the latter
still generates movzx + vmovd. There's still a miss optimization.
Thus I prefer to optimize it in the backend pxor + pinsrw -> movzx +
movd -> vmovw (under avx512fp16).
I'll open a PR for that and optimize it in GCC13.
>
>  extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
> --- gcc/testsuite/gcc.target/i386/pr95483-1.c.jj        2020-10-14 22:05:19.380856952 +0200
> +++ gcc/testsuite/gcc.target/i386/pr95483-1.c   2022-03-14 12:11:07.716891710 +0100
> @@ -1,7 +1,7 @@
>  /* { dg-do compile } */
> -/* { dg-options "-O2 -msse" } */
> -/* { dg-final { scan-assembler-times "pxor\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
> -/* { dg-final { scan-assembler-times "pinsrw\[ \\t\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
> +/* { dg-options "-O2 -msse2" } */
> +/* { dg-final { scan-assembler-times "(?:movzwl\[ \\t\]+\[^\n\]*|pxor\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+)(?:\n|\[ \\t\]+#)" 1 } } */
> +/* { dg-final { scan-assembler-times "(?:movd|pinsrw)\[ \\t\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
>  /* { dg-final { scan-assembler-times "pextrw\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*(?:\n|\[ \\t\]+#)" 1 } } */
>
>
>
>
>         Jakub
>
Hongtao Liu March 14, 2022, 12:24 p.m. UTC | #4
On Mon, Mar 14, 2022 at 8:20 PM Hongtao Liu <crazylht@gmail.com> wrote:
>
> On Mon, Mar 14, 2022 at 7:25 PM Jakub Jelinek <jakub@redhat.com> wrote:
> >
> > On Sun, Mar 13, 2022 at 09:34:10PM +0800, Hongtao Liu wrote:
> > > LGTM, thanks for handling this.
> >
> > Thanks, committed.
> >
> > > > Note, while the Intrinsics guide for _mm_loadu_si32 says SSE2,
> > > > for _mm_loadu_si16 it says strangely SSE.  But the intrinsics
> > > > returns __m128i, which is only defined in emmintrin.h, and
> > > > _mm_set_epi16 is also only SSE2 and later in emmintrin.h.
> > > > Even clang defines it in emmintrin.h and ends up with inlining
> > > > failure when calling _mm_loadu_si16 from sse,no-sse2 function.
> > > > So, isn't that a bug in the intrinsic guide instead?
> > > I think it's a bug, it's supposed to generate movzx + movd, and movd
> > > is under sse2, and have reported it to the colleague who maintains
> > > Intel intrinsic guide.
> > >
> > > Similar bug for
> > > _mm_loadu_si64
> > > _mm_storeu_si16
> > > _mm_storeu_si64
> >
> > Currently it emits pxor + pinsrw, but even those are SSE2 instructions,
> > unless they use a MMX register (then it is MMX and SSE).
> > I agree that movzwl + movd seems better than pxor + pinsrw though.
> > So, do we want to help it a little bit then?  Like:
> >
> > 2022-03-14  Jakub Jelinek  <jakub@redhat.com>
> >
> >         * config/i386/eemintrin.h (_mm_loadu_si16): Use _mm_set_epi32 instead
> >         of _mm_set_epi16 and zero extend the memory load.
> >
> >         * gcc.target/i386/pr95483-1.c: Use -msse2 instead of -msse in
> >         dg-options, allow movzwl+movd instead of pxor with pinsrw.
> >
> > --- gcc/config/i386/emmintrin.h.jj      2022-03-14 10:44:29.402617685 +0100
> > +++ gcc/config/i386/emmintrin.h 2022-03-14 11:58:18.062666257 +0100
> > @@ -724,7 +724,7 @@ _mm_loadu_si32 (void const *__P)
> >  extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
> >  _mm_loadu_si16 (void const *__P)
> >  {
> > -  return _mm_set_epi16 (0, 0, 0, 0, 0, 0, 0, (*(__m16_u *)__P)[0]);
> > +  return _mm_set_epi32 (0, 0, 0, (unsigned short) ((*(__m16_u *)__P)[0]));
> >  }
> Under avx512fp16,  the former directly generates vmovw, but the latter
> still generates movzx + vmovd. There's still a miss optimization.
> Thus I prefer to optimize it in the backend pxor + pinsrw -> movzx +
> movd -> vmovw (under avx512fp16).
> I'll open a PR for that and optimize it in GCC13.
PR104915.
> >
> >  extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
> > --- gcc/testsuite/gcc.target/i386/pr95483-1.c.jj        2020-10-14 22:05:19.380856952 +0200
> > +++ gcc/testsuite/gcc.target/i386/pr95483-1.c   2022-03-14 12:11:07.716891710 +0100
> > @@ -1,7 +1,7 @@
> >  /* { dg-do compile } */
> > -/* { dg-options "-O2 -msse" } */
> > -/* { dg-final { scan-assembler-times "pxor\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
> > -/* { dg-final { scan-assembler-times "pinsrw\[ \\t\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
> > +/* { dg-options "-O2 -msse2" } */
> > +/* { dg-final { scan-assembler-times "(?:movzwl\[ \\t\]+\[^\n\]*|pxor\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+)(?:\n|\[ \\t\]+#)" 1 } } */
> > +/* { dg-final { scan-assembler-times "(?:movd|pinsrw)\[ \\t\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
> >  /* { dg-final { scan-assembler-times "pextrw\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*(?:\n|\[ \\t\]+#)" 1 } } */
> >
> >
> >
> >
> >         Jakub
> >
>
>
> --
> BR,
> Hongtao
diff mbox series

Patch

--- gcc/config/i386/emmintrin.h.jj	2022-01-11 23:11:21.766298923 +0100
+++ gcc/config/i386/emmintrin.h	2022-03-11 16:47:24.789884825 +0100
@@ -718,14 +718,13 @@  _mm_loadu_si64 (void const *__P)
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
 _mm_loadu_si32 (void const *__P)
 {
-  return _mm_set_epi32 (*(int *)__P, (int)0, (int)0, (int)0);
+  return _mm_set_epi32 (0, 0, 0, (*(__m32_u *)__P)[0]);
 }
 
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
 _mm_loadu_si16 (void const *__P)
 {
-  return _mm_set_epi16 (*(short *)__P, (short)0, (short)0, (short)0,
-			(short)0, (short)0, (short)0, (short)0);
+  return _mm_set_epi16 (0, 0, 0, 0, 0, 0, 0, (*(__m16_u *)__P)[0]);
 }
 
 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
--- gcc/testsuite/gcc.target/i386/pr99754-1.c.jj	2022-03-11 16:43:30.621120896 +0100
+++ gcc/testsuite/gcc.target/i386/pr99754-1.c	2022-03-11 16:43:18.250291856 +0100
@@ -0,0 +1,20 @@ 
+/* PR target/99754 */
+/* { dg-do run } */
+/* { dg-options "-O2 -msse2" } */
+/* { dg-require-effective-target sse2 } */
+
+#include "sse2-check.h"
+#include <emmintrin.h>
+
+static void
+sse2_test (void)
+{
+  union { unsigned char buf[32]; long long ll; } u;
+  u.buf[1] = 0xfe;
+  u.buf[2] = 0xca;
+  u.buf[17] = 0xaa;
+  u.buf[18] = 0x55;
+  _mm_storeu_si16 (&u.buf[17], _mm_loadu_si16 (&u.buf[1]));
+  if (u.buf[17] != 0xfe || u.buf[18] != 0xca)
+    abort ();
+}
--- gcc/testsuite/gcc.target/i386/pr99754-2.c.jj	2022-03-11 16:43:41.701967763 +0100
+++ gcc/testsuite/gcc.target/i386/pr99754-2.c	2022-03-11 16:45:16.391659211 +0100
@@ -0,0 +1,24 @@ 
+/* PR target/99754 */
+/* { dg-do run } */
+/* { dg-options "-O2 -msse2" } */
+/* { dg-require-effective-target sse2 } */
+
+#include "sse2-check.h"
+#include <emmintrin.h>
+
+static void
+sse2_test (void)
+{
+  union { unsigned char buf[32]; long long ll; } u;
+  u.buf[1] = 0xbe;
+  u.buf[2] = 0xba;
+  u.buf[3] = 0xfe;
+  u.buf[4] = 0xca;
+  u.buf[17] = 0xaa;
+  u.buf[18] = 0x55;
+  u.buf[19] = 0xaa;
+  u.buf[20] = 0x55;
+  _mm_storeu_si32 (&u.buf[17], _mm_loadu_si32 (&u.buf[1]));
+  if (u.buf[17] != 0xbe || u.buf[18] != 0xba || u.buf[19] != 0xfe || u.buf[20] != 0xca)
+    abort ();
+}