diff mbox series

i386: Emit 16b atomics inline with -m64 -mcx16 -mavx [PR104688]

Message ID Y3Hz0QswGCGclWGu@tucnak
State New
Headers show
Series i386: Emit 16b atomics inline with -m64 -mcx16 -mavx [PR104688] | expand

Commit Message

Jakub Jelinek Nov. 14, 2022, 7:52 a.m. UTC
Hi!

Working virtually out of Baker Island.

Given
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104688#c10
the following patch implements atomic load/store (and therefore also
enabling compare and exchange) for -m64 -mcx16 -mavx.

Ok for trunk if it passes bootstrap/regtest?

2022-11-13  Jakub Jelinek  <jakub@redhat.com>

	PR target/104688
	* config/i386/sync.md (atomic_loadti, atomic_storeti): New
	define_expand patterns.
	(atomic_loadti_1, atomic_storeti_1): New define_insn patterns.

	* gcc.target/i386/pr104688-1.c: New test.
	* gcc.target/i386/pr104688-2.c: New test.
	* gcc.target/i386/pr104688-3.c: New test.



	Jakub

Comments

Uros Bizjak Nov. 14, 2022, 7:57 a.m. UTC | #1
On Mon, Nov 14, 2022 at 8:52 AM Jakub Jelinek <jakub@redhat.com> wrote:
>
> Hi!
>
> Working virtually out of Baker Island.
>
> Given
> https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104688#c10
> the following patch implements atomic load/store (and therefore also
> enabling compare and exchange) for -m64 -mcx16 -mavx.
>
> Ok for trunk if it passes bootstrap/regtest?

We only have guarantee from Intel and AMD, there can be other vendors.

Uros.

>
> 2022-11-13  Jakub Jelinek  <jakub@redhat.com>
>
>         PR target/104688
>         * config/i386/sync.md (atomic_loadti, atomic_storeti): New
>         define_expand patterns.
>         (atomic_loadti_1, atomic_storeti_1): New define_insn patterns.
>
>         * gcc.target/i386/pr104688-1.c: New test.
>         * gcc.target/i386/pr104688-2.c: New test.
>         * gcc.target/i386/pr104688-3.c: New test.
>
> --- gcc/config/i386/sync.md.jj  2022-11-07 20:54:37.259400942 -1200
> +++ gcc/config/i386/sync.md     2022-11-13 19:27:22.977987355 -1200
> @@ -225,6 +225,31 @@ (define_insn_and_split "atomic_loaddi_fp
>    DONE;
>  })
>
> +;; Intel SDM guarantees that 16-byte VMOVDQA on 16-byte aligned address
> +;; is atomic.  AMD will give a similar guarantee.
> +(define_expand "atomic_loadti"
> +  [(set (match_operand:TI 0 "register_operand" "=x,Yv")
> +       (unspec:TI [(match_operand:TI 1 "memory_operand" "m,m")
> +                   (match_operand:SI 2 "const_int_operand")]
> +                  UNSPEC_LDA))]
> +  "TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX"
> +{
> +  emit_insn (gen_atomic_loadti_1 (operands[0], operands[1]));
> +  DONE;
> +})
> +
> +(define_insn "atomic_loadti_1"
> +  [(set (match_operand:TI 0 "register_operand" "=x,Yv")
> +       (unspec:TI [(match_operand:TI 1 "memory_operand" "m,m")]
> +                  UNSPEC_LDA))]
> +  "TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX"
> +  "@
> +   vmovdqa\t{%1, %0|%0, %1}
> +   vmovdqa64\t{%1, %0|%0, %1}"
> +  [(set_attr "type" "ssemov")
> +   (set_attr "prefix" "vex,evex")
> +   (set_attr "mode" "TI")])
> +
>  (define_expand "atomic_store<mode>"
>    [(set (match_operand:ATOMIC 0 "memory_operand")
>         (unspec:ATOMIC [(match_operand:ATOMIC 1 "nonimmediate_operand")
> @@ -276,6 +301,36 @@ (define_insn "atomic_store<mode>_1"
>    ""
>    "%K2mov{<imodesuffix>}\t{%1, %0|%0, %1}")
>
> +(define_expand "atomic_storeti"
> +  [(set (match_operand:TI 0 "memory_operand")
> +       (unspec:TI [(match_operand:TI 1 "register_operand")
> +                   (match_operand:SI 2 "const_int_operand")]
> +                  UNSPEC_STA))]
> +  "TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX"
> +{
> +  enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
> +
> +  emit_insn (gen_atomic_storeti_1 (operands[0], operands[1], operands[2]));
> +
> +  /* ... followed by an MFENCE, if required.  */
> +  if (is_mm_seq_cst (model))
> +    emit_insn (gen_mem_thread_fence (operands[2]));
> +  DONE;
> +})
> +
> +(define_insn "atomic_storeti_1"
> +  [(set (match_operand:TI 0 "memory_operand" "=m,m")
> +       (unspec:TI [(match_operand:TI 1 "register_operand" "x,Yv")
> +                    (match_operand:SI 2 "const_int_operand")]
> +                   UNSPEC_STA))]
> +  ""
> +  "@
> +   %K2vmovdqa\t{%1, %0|%0, %1}
> +   %K2vmovdqa64\t{%1, %0|%0, %1}"
> +  [(set_attr "type" "ssemov")
> +   (set_attr "prefix" "vex,evex")
> +   (set_attr "mode" "TI")])
> +
>  (define_insn_and_split "atomic_storedi_fpu"
>    [(set (match_operand:DI 0 "memory_operand" "=m,m,m")
>         (unspec:DI [(match_operand:DI 1 "nonimmediate_operand" "x,m,?r")]
> --- gcc/testsuite/gcc.target/i386/pr104688-1.c.jj       2022-11-13 19:36:43.251332612 -1200
> +++ gcc/testsuite/gcc.target/i386/pr104688-1.c  2022-11-13 19:40:22.649334650 -1200
> @@ -0,0 +1,34 @@
> +/* PR target/104688 */
> +/* { dg-do compile { target int128 } } */
> +/* { dg-options "-O2 -mno-cx16" } */
> +/* { dg-final { scan-assembler "\t__sync_val_compare_and_swap_16" } } */
> +/* { dg-final { scan-assembler "\t__atomic_load_16" } } */
> +/* { dg-final { scan-assembler "\t__atomic_store_16" } } */
> +/* { dg-final { scan-assembler "\t__atomic_compare_exchange_16" } } */
> +
> +__int128 v;
> +
> +__int128
> +f1 (void)
> +{
> +  return __sync_val_compare_and_swap (&v, 42, 0);
> +}
> +
> +__int128
> +f2 (void)
> +{
> +  return __atomic_load_n (&v, __ATOMIC_SEQ_CST);
> +}
> +
> +void
> +f3 (__int128 x)
> +{
> +  __atomic_store_n (&v, 42, __ATOMIC_SEQ_CST);
> +}
> +
> +__int128
> +f4 (void)
> +{
> +  __int128 y = 42;
> +  __atomic_compare_exchange_n (&v, &y, 0, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
> +}
> --- gcc/testsuite/gcc.target/i386/pr104688-2.c.jj       2022-11-13 19:36:46.513288025 -1200
> +++ gcc/testsuite/gcc.target/i386/pr104688-2.c  2022-11-13 19:40:34.676170305 -1200
> @@ -0,0 +1,34 @@
> +/* PR target/104688 */
> +/* { dg-do compile { target int128 } } */
> +/* { dg-options "-O2 -mno-avx" } */
> +/* { dg-final { scan-assembler "\t__sync_val_compare_and_swap_16" } } */
> +/* { dg-final { scan-assembler "\t__atomic_load_16" } } */
> +/* { dg-final { scan-assembler "\t__atomic_store_16" } } */
> +/* { dg-final { scan-assembler "\t__atomic_compare_exchange_16" } } */
> +
> +__int128 v;
> +
> +__int128
> +f1 (void)
> +{
> +  return __sync_val_compare_and_swap (&v, 42, 0);
> +}
> +
> +__int128
> +f2 (void)
> +{
> +  return __atomic_load_n (&v, __ATOMIC_SEQ_CST);
> +}
> +
> +void
> +f3 (__int128 x)
> +{
> +  __atomic_store_n (&v, 42, __ATOMIC_SEQ_CST);
> +}
> +
> +__int128
> +f4 (void)
> +{
> +  __int128 y = 42;
> +  __atomic_compare_exchange_n (&v, &y, 0, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
> +}
> --- gcc/testsuite/gcc.target/i386/pr104688-3.c.jj       2022-11-13 19:37:00.899091450 -1200
> +++ gcc/testsuite/gcc.target/i386/pr104688-3.c  2022-11-13 19:40:41.984070460 -1200
> @@ -0,0 +1,34 @@
> +/* PR target/104688 */
> +/* { dg-do compile { target int128 } } */
> +/* { dg-options "-O2 -mcx16 -mavx" } */
> +/* { dg-final { scan-assembler-not "\t__sync_val_compare_and_swap_16" } } */
> +/* { dg-final { scan-assembler-not "\t__atomic_load_16" } } */
> +/* { dg-final { scan-assembler-not "\t__atomic_store_16" } } */
> +/* { dg-final { scan-assembler-not "\t__atomic_compare_exchange_16" } } */
> +
> +__int128 v;
> +
> +__int128
> +f1 (void)
> +{
> +  return __sync_val_compare_and_swap (&v, 42, 0);
> +}
> +
> +__int128
> +f2 (void)
> +{
> +  return __atomic_load_n (&v, __ATOMIC_SEQ_CST);
> +}
> +
> +void
> +f3 (__int128 x)
> +{
> +  __atomic_store_n (&v, 42, __ATOMIC_SEQ_CST);
> +}
> +
> +__int128
> +f4 (void)
> +{
> +  __int128 y = 42;
> +  __atomic_compare_exchange_n (&v, &y, 0, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
> +}
>
>
>         Jakub
>
Hongtao Liu Nov. 14, 2022, 9:04 a.m. UTC | #2
On Mon, Nov 14, 2022 at 3:57 PM Uros Bizjak via Gcc-patches
<gcc-patches@gcc.gnu.org> wrote:
>
> On Mon, Nov 14, 2022 at 8:52 AM Jakub Jelinek <jakub@redhat.com> wrote:
> >
> > Hi!
> >
> > Working virtually out of Baker Island.
> >
> > Given
> > https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104688#c10
> > the following patch implements atomic load/store (and therefore also
> > enabling compare and exchange) for -m64 -mcx16 -mavx.
> >
> > Ok for trunk if it passes bootstrap/regtest?
>
> We only have guarantee from Intel and AMD, there can be other vendors.
Can we make it a as a micro-architecture tuning?
>
> Uros.
>
> >
> > 2022-11-13  Jakub Jelinek  <jakub@redhat.com>
> >
> >         PR target/104688
> >         * config/i386/sync.md (atomic_loadti, atomic_storeti): New
> >         define_expand patterns.
> >         (atomic_loadti_1, atomic_storeti_1): New define_insn patterns.
> >
> >         * gcc.target/i386/pr104688-1.c: New test.
> >         * gcc.target/i386/pr104688-2.c: New test.
> >         * gcc.target/i386/pr104688-3.c: New test.
> >
> > --- gcc/config/i386/sync.md.jj  2022-11-07 20:54:37.259400942 -1200
> > +++ gcc/config/i386/sync.md     2022-11-13 19:27:22.977987355 -1200
> > @@ -225,6 +225,31 @@ (define_insn_and_split "atomic_loaddi_fp
> >    DONE;
> >  })
> >
> > +;; Intel SDM guarantees that 16-byte VMOVDQA on 16-byte aligned address
> > +;; is atomic.  AMD will give a similar guarantee.
> > +(define_expand "atomic_loadti"
> > +  [(set (match_operand:TI 0 "register_operand" "=x,Yv")
> > +       (unspec:TI [(match_operand:TI 1 "memory_operand" "m,m")
> > +                   (match_operand:SI 2 "const_int_operand")]
> > +                  UNSPEC_LDA))]
> > +  "TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX"
> > +{
> > +  emit_insn (gen_atomic_loadti_1 (operands[0], operands[1]));
> > +  DONE;
> > +})
> > +
> > +(define_insn "atomic_loadti_1"
> > +  [(set (match_operand:TI 0 "register_operand" "=x,Yv")
> > +       (unspec:TI [(match_operand:TI 1 "memory_operand" "m,m")]
> > +                  UNSPEC_LDA))]
> > +  "TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX"
> > +  "@
> > +   vmovdqa\t{%1, %0|%0, %1}
> > +   vmovdqa64\t{%1, %0|%0, %1}"
> > +  [(set_attr "type" "ssemov")
> > +   (set_attr "prefix" "vex,evex")
> > +   (set_attr "mode" "TI")])
> > +
> >  (define_expand "atomic_store<mode>"
> >    [(set (match_operand:ATOMIC 0 "memory_operand")
> >         (unspec:ATOMIC [(match_operand:ATOMIC 1 "nonimmediate_operand")
> > @@ -276,6 +301,36 @@ (define_insn "atomic_store<mode>_1"
> >    ""
> >    "%K2mov{<imodesuffix>}\t{%1, %0|%0, %1}")
> >
> > +(define_expand "atomic_storeti"
> > +  [(set (match_operand:TI 0 "memory_operand")
> > +       (unspec:TI [(match_operand:TI 1 "register_operand")
> > +                   (match_operand:SI 2 "const_int_operand")]
> > +                  UNSPEC_STA))]
> > +  "TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX"
> > +{
> > +  enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
> > +
> > +  emit_insn (gen_atomic_storeti_1 (operands[0], operands[1], operands[2]));
> > +
> > +  /* ... followed by an MFENCE, if required.  */
> > +  if (is_mm_seq_cst (model))
> > +    emit_insn (gen_mem_thread_fence (operands[2]));
> > +  DONE;
> > +})
> > +
> > +(define_insn "atomic_storeti_1"
> > +  [(set (match_operand:TI 0 "memory_operand" "=m,m")
> > +       (unspec:TI [(match_operand:TI 1 "register_operand" "x,Yv")
> > +                    (match_operand:SI 2 "const_int_operand")]
> > +                   UNSPEC_STA))]
> > +  ""
> > +  "@
> > +   %K2vmovdqa\t{%1, %0|%0, %1}
> > +   %K2vmovdqa64\t{%1, %0|%0, %1}"
> > +  [(set_attr "type" "ssemov")
> > +   (set_attr "prefix" "vex,evex")
> > +   (set_attr "mode" "TI")])
> > +
> >  (define_insn_and_split "atomic_storedi_fpu"
> >    [(set (match_operand:DI 0 "memory_operand" "=m,m,m")
> >         (unspec:DI [(match_operand:DI 1 "nonimmediate_operand" "x,m,?r")]
> > --- gcc/testsuite/gcc.target/i386/pr104688-1.c.jj       2022-11-13 19:36:43.251332612 -1200
> > +++ gcc/testsuite/gcc.target/i386/pr104688-1.c  2022-11-13 19:40:22.649334650 -1200
> > @@ -0,0 +1,34 @@
> > +/* PR target/104688 */
> > +/* { dg-do compile { target int128 } } */
> > +/* { dg-options "-O2 -mno-cx16" } */
> > +/* { dg-final { scan-assembler "\t__sync_val_compare_and_swap_16" } } */
> > +/* { dg-final { scan-assembler "\t__atomic_load_16" } } */
> > +/* { dg-final { scan-assembler "\t__atomic_store_16" } } */
> > +/* { dg-final { scan-assembler "\t__atomic_compare_exchange_16" } } */
> > +
> > +__int128 v;
> > +
> > +__int128
> > +f1 (void)
> > +{
> > +  return __sync_val_compare_and_swap (&v, 42, 0);
> > +}
> > +
> > +__int128
> > +f2 (void)
> > +{
> > +  return __atomic_load_n (&v, __ATOMIC_SEQ_CST);
> > +}
> > +
> > +void
> > +f3 (__int128 x)
> > +{
> > +  __atomic_store_n (&v, 42, __ATOMIC_SEQ_CST);
> > +}
> > +
> > +__int128
> > +f4 (void)
> > +{
> > +  __int128 y = 42;
> > +  __atomic_compare_exchange_n (&v, &y, 0, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
> > +}
> > --- gcc/testsuite/gcc.target/i386/pr104688-2.c.jj       2022-11-13 19:36:46.513288025 -1200
> > +++ gcc/testsuite/gcc.target/i386/pr104688-2.c  2022-11-13 19:40:34.676170305 -1200
> > @@ -0,0 +1,34 @@
> > +/* PR target/104688 */
> > +/* { dg-do compile { target int128 } } */
> > +/* { dg-options "-O2 -mno-avx" } */
> > +/* { dg-final { scan-assembler "\t__sync_val_compare_and_swap_16" } } */
> > +/* { dg-final { scan-assembler "\t__atomic_load_16" } } */
> > +/* { dg-final { scan-assembler "\t__atomic_store_16" } } */
> > +/* { dg-final { scan-assembler "\t__atomic_compare_exchange_16" } } */
> > +
> > +__int128 v;
> > +
> > +__int128
> > +f1 (void)
> > +{
> > +  return __sync_val_compare_and_swap (&v, 42, 0);
> > +}
> > +
> > +__int128
> > +f2 (void)
> > +{
> > +  return __atomic_load_n (&v, __ATOMIC_SEQ_CST);
> > +}
> > +
> > +void
> > +f3 (__int128 x)
> > +{
> > +  __atomic_store_n (&v, 42, __ATOMIC_SEQ_CST);
> > +}
> > +
> > +__int128
> > +f4 (void)
> > +{
> > +  __int128 y = 42;
> > +  __atomic_compare_exchange_n (&v, &y, 0, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
> > +}
> > --- gcc/testsuite/gcc.target/i386/pr104688-3.c.jj       2022-11-13 19:37:00.899091450 -1200
> > +++ gcc/testsuite/gcc.target/i386/pr104688-3.c  2022-11-13 19:40:41.984070460 -1200
> > @@ -0,0 +1,34 @@
> > +/* PR target/104688 */
> > +/* { dg-do compile { target int128 } } */
> > +/* { dg-options "-O2 -mcx16 -mavx" } */
> > +/* { dg-final { scan-assembler-not "\t__sync_val_compare_and_swap_16" } } */
> > +/* { dg-final { scan-assembler-not "\t__atomic_load_16" } } */
> > +/* { dg-final { scan-assembler-not "\t__atomic_store_16" } } */
> > +/* { dg-final { scan-assembler-not "\t__atomic_compare_exchange_16" } } */
> > +
> > +__int128 v;
> > +
> > +__int128
> > +f1 (void)
> > +{
> > +  return __sync_val_compare_and_swap (&v, 42, 0);
> > +}
> > +
> > +__int128
> > +f2 (void)
> > +{
> > +  return __atomic_load_n (&v, __ATOMIC_SEQ_CST);
> > +}
> > +
> > +void
> > +f3 (__int128 x)
> > +{
> > +  __atomic_store_n (&v, 42, __ATOMIC_SEQ_CST);
> > +}
> > +
> > +__int128
> > +f4 (void)
> > +{
> > +  __int128 y = 42;
> > +  __atomic_compare_exchange_n (&v, &y, 0, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
> > +}
> >
> >
> >         Jakub
> >
Hongtao Liu Nov. 14, 2022, 9:08 a.m. UTC | #3
On Mon, Nov 14, 2022 at 5:04 PM Hongtao Liu <crazylht@gmail.com> wrote:
>
> On Mon, Nov 14, 2022 at 3:57 PM Uros Bizjak via Gcc-patches
> <gcc-patches@gcc.gnu.org> wrote:
> >
> > On Mon, Nov 14, 2022 at 8:52 AM Jakub Jelinek <jakub@redhat.com> wrote:
> > >
> > > Hi!
> > >
> > > Working virtually out of Baker Island.
> > >
> > > Given
> > > https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104688#c10
> > > the following patch implements atomic load/store (and therefore also
> > > enabling compare and exchange) for -m64 -mcx16 -mavx.
> > >
> > > Ok for trunk if it passes bootstrap/regtest?
> >
> > We only have guarantee from Intel and AMD, there can be other vendors.
> Can we make it a as a micro-architecture tuning?
Or is this the kind that might cause correctness problems (for
example, other vendors use -mtune=intel/amd processors) is not
suitable for tuning as a microarchitecture?
> >
> > Uros.
> >
> > >
> > > 2022-11-13  Jakub Jelinek  <jakub@redhat.com>
> > >
> > >         PR target/104688
> > >         * config/i386/sync.md (atomic_loadti, atomic_storeti): New
> > >         define_expand patterns.
> > >         (atomic_loadti_1, atomic_storeti_1): New define_insn patterns.
> > >
> > >         * gcc.target/i386/pr104688-1.c: New test.
> > >         * gcc.target/i386/pr104688-2.c: New test.
> > >         * gcc.target/i386/pr104688-3.c: New test.
> > >
> > > --- gcc/config/i386/sync.md.jj  2022-11-07 20:54:37.259400942 -1200
> > > +++ gcc/config/i386/sync.md     2022-11-13 19:27:22.977987355 -1200
> > > @@ -225,6 +225,31 @@ (define_insn_and_split "atomic_loaddi_fp
> > >    DONE;
> > >  })
> > >
> > > +;; Intel SDM guarantees that 16-byte VMOVDQA on 16-byte aligned address
> > > +;; is atomic.  AMD will give a similar guarantee.
> > > +(define_expand "atomic_loadti"
> > > +  [(set (match_operand:TI 0 "register_operand" "=x,Yv")
> > > +       (unspec:TI [(match_operand:TI 1 "memory_operand" "m,m")
> > > +                   (match_operand:SI 2 "const_int_operand")]
> > > +                  UNSPEC_LDA))]
> > > +  "TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX"
> > > +{
> > > +  emit_insn (gen_atomic_loadti_1 (operands[0], operands[1]));
> > > +  DONE;
> > > +})
> > > +
> > > +(define_insn "atomic_loadti_1"
> > > +  [(set (match_operand:TI 0 "register_operand" "=x,Yv")
> > > +       (unspec:TI [(match_operand:TI 1 "memory_operand" "m,m")]
> > > +                  UNSPEC_LDA))]
> > > +  "TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX"
> > > +  "@
> > > +   vmovdqa\t{%1, %0|%0, %1}
> > > +   vmovdqa64\t{%1, %0|%0, %1}"
> > > +  [(set_attr "type" "ssemov")
> > > +   (set_attr "prefix" "vex,evex")
> > > +   (set_attr "mode" "TI")])
> > > +
> > >  (define_expand "atomic_store<mode>"
> > >    [(set (match_operand:ATOMIC 0 "memory_operand")
> > >         (unspec:ATOMIC [(match_operand:ATOMIC 1 "nonimmediate_operand")
> > > @@ -276,6 +301,36 @@ (define_insn "atomic_store<mode>_1"
> > >    ""
> > >    "%K2mov{<imodesuffix>}\t{%1, %0|%0, %1}")
> > >
> > > +(define_expand "atomic_storeti"
> > > +  [(set (match_operand:TI 0 "memory_operand")
> > > +       (unspec:TI [(match_operand:TI 1 "register_operand")
> > > +                   (match_operand:SI 2 "const_int_operand")]
> > > +                  UNSPEC_STA))]
> > > +  "TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX"
> > > +{
> > > +  enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
> > > +
> > > +  emit_insn (gen_atomic_storeti_1 (operands[0], operands[1], operands[2]));
> > > +
> > > +  /* ... followed by an MFENCE, if required.  */
> > > +  if (is_mm_seq_cst (model))
> > > +    emit_insn (gen_mem_thread_fence (operands[2]));
> > > +  DONE;
> > > +})
> > > +
> > > +(define_insn "atomic_storeti_1"
> > > +  [(set (match_operand:TI 0 "memory_operand" "=m,m")
> > > +       (unspec:TI [(match_operand:TI 1 "register_operand" "x,Yv")
> > > +                    (match_operand:SI 2 "const_int_operand")]
> > > +                   UNSPEC_STA))]
> > > +  ""
> > > +  "@
> > > +   %K2vmovdqa\t{%1, %0|%0, %1}
> > > +   %K2vmovdqa64\t{%1, %0|%0, %1}"
> > > +  [(set_attr "type" "ssemov")
> > > +   (set_attr "prefix" "vex,evex")
> > > +   (set_attr "mode" "TI")])
> > > +
> > >  (define_insn_and_split "atomic_storedi_fpu"
> > >    [(set (match_operand:DI 0 "memory_operand" "=m,m,m")
> > >         (unspec:DI [(match_operand:DI 1 "nonimmediate_operand" "x,m,?r")]
> > > --- gcc/testsuite/gcc.target/i386/pr104688-1.c.jj       2022-11-13 19:36:43.251332612 -1200
> > > +++ gcc/testsuite/gcc.target/i386/pr104688-1.c  2022-11-13 19:40:22.649334650 -1200
> > > @@ -0,0 +1,34 @@
> > > +/* PR target/104688 */
> > > +/* { dg-do compile { target int128 } } */
> > > +/* { dg-options "-O2 -mno-cx16" } */
> > > +/* { dg-final { scan-assembler "\t__sync_val_compare_and_swap_16" } } */
> > > +/* { dg-final { scan-assembler "\t__atomic_load_16" } } */
> > > +/* { dg-final { scan-assembler "\t__atomic_store_16" } } */
> > > +/* { dg-final { scan-assembler "\t__atomic_compare_exchange_16" } } */
> > > +
> > > +__int128 v;
> > > +
> > > +__int128
> > > +f1 (void)
> > > +{
> > > +  return __sync_val_compare_and_swap (&v, 42, 0);
> > > +}
> > > +
> > > +__int128
> > > +f2 (void)
> > > +{
> > > +  return __atomic_load_n (&v, __ATOMIC_SEQ_CST);
> > > +}
> > > +
> > > +void
> > > +f3 (__int128 x)
> > > +{
> > > +  __atomic_store_n (&v, 42, __ATOMIC_SEQ_CST);
> > > +}
> > > +
> > > +__int128
> > > +f4 (void)
> > > +{
> > > +  __int128 y = 42;
> > > +  __atomic_compare_exchange_n (&v, &y, 0, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
> > > +}
> > > --- gcc/testsuite/gcc.target/i386/pr104688-2.c.jj       2022-11-13 19:36:46.513288025 -1200
> > > +++ gcc/testsuite/gcc.target/i386/pr104688-2.c  2022-11-13 19:40:34.676170305 -1200
> > > @@ -0,0 +1,34 @@
> > > +/* PR target/104688 */
> > > +/* { dg-do compile { target int128 } } */
> > > +/* { dg-options "-O2 -mno-avx" } */
> > > +/* { dg-final { scan-assembler "\t__sync_val_compare_and_swap_16" } } */
> > > +/* { dg-final { scan-assembler "\t__atomic_load_16" } } */
> > > +/* { dg-final { scan-assembler "\t__atomic_store_16" } } */
> > > +/* { dg-final { scan-assembler "\t__atomic_compare_exchange_16" } } */
> > > +
> > > +__int128 v;
> > > +
> > > +__int128
> > > +f1 (void)
> > > +{
> > > +  return __sync_val_compare_and_swap (&v, 42, 0);
> > > +}
> > > +
> > > +__int128
> > > +f2 (void)
> > > +{
> > > +  return __atomic_load_n (&v, __ATOMIC_SEQ_CST);
> > > +}
> > > +
> > > +void
> > > +f3 (__int128 x)
> > > +{
> > > +  __atomic_store_n (&v, 42, __ATOMIC_SEQ_CST);
> > > +}
> > > +
> > > +__int128
> > > +f4 (void)
> > > +{
> > > +  __int128 y = 42;
> > > +  __atomic_compare_exchange_n (&v, &y, 0, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
> > > +}
> > > --- gcc/testsuite/gcc.target/i386/pr104688-3.c.jj       2022-11-13 19:37:00.899091450 -1200
> > > +++ gcc/testsuite/gcc.target/i386/pr104688-3.c  2022-11-13 19:40:41.984070460 -1200
> > > @@ -0,0 +1,34 @@
> > > +/* PR target/104688 */
> > > +/* { dg-do compile { target int128 } } */
> > > +/* { dg-options "-O2 -mcx16 -mavx" } */
> > > +/* { dg-final { scan-assembler-not "\t__sync_val_compare_and_swap_16" } } */
> > > +/* { dg-final { scan-assembler-not "\t__atomic_load_16" } } */
> > > +/* { dg-final { scan-assembler-not "\t__atomic_store_16" } } */
> > > +/* { dg-final { scan-assembler-not "\t__atomic_compare_exchange_16" } } */
> > > +
> > > +__int128 v;
> > > +
> > > +__int128
> > > +f1 (void)
> > > +{
> > > +  return __sync_val_compare_and_swap (&v, 42, 0);
> > > +}
> > > +
> > > +__int128
> > > +f2 (void)
> > > +{
> > > +  return __atomic_load_n (&v, __ATOMIC_SEQ_CST);
> > > +}
> > > +
> > > +void
> > > +f3 (__int128 x)
> > > +{
> > > +  __atomic_store_n (&v, 42, __ATOMIC_SEQ_CST);
> > > +}
> > > +
> > > +__int128
> > > +f4 (void)
> > > +{
> > > +  __int128 y = 42;
> > > +  __atomic_compare_exchange_n (&v, &y, 0, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
> > > +}
> > >
> > >
> > >         Jakub
> > >
>
>
>
> --
> BR,
> Hongtao
Jakub Jelinek Nov. 14, 2022, 9:13 a.m. UTC | #4
On Mon, Nov 14, 2022 at 05:04:24PM +0800, Hongtao Liu wrote:
> On Mon, Nov 14, 2022 at 3:57 PM Uros Bizjak via Gcc-patches
> <gcc-patches@gcc.gnu.org> wrote:
> >
> > On Mon, Nov 14, 2022 at 8:52 AM Jakub Jelinek <jakub@redhat.com> wrote:
> > >
> > > Hi!
> > >
> > > Working virtually out of Baker Island.
> > >
> > > Given
> > > https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104688#c10
> > > the following patch implements atomic load/store (and therefore also
> > > enabling compare and exchange) for -m64 -mcx16 -mavx.
> > >
> > > Ok for trunk if it passes bootstrap/regtest?
> >
> > We only have guarantee from Intel and AMD, there can be other vendors.
> Can we make it a as a micro-architecture tuning?

No, -mtune= isn't a guarantee the code will be executed only on certain CPUs
(-march= is), -mtune= is only about optimizing code for certain CPU.
If we don't get a guarantee from the remaining makers of CPUs with AVX +
CX16 ISAs, another option would be to add a new -mvmovdqa-atomic option
and set it on for -march= of Intel and AMD CPUs with AVX + CX16
and use
TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX && TARGET_VMOVDQA_ATOMIC
as the conditions in the patch.
But that would use it only for -march=native or when people -march=
a particular Intel or AMD CPU, while if we get a guarantee from all AVX+CX16
CPU makers, then it can be on by default with just -mcx16 -mavx.

	Jakub
diff mbox series

Patch

--- gcc/config/i386/sync.md.jj	2022-11-07 20:54:37.259400942 -1200
+++ gcc/config/i386/sync.md	2022-11-13 19:27:22.977987355 -1200
@@ -225,6 +225,31 @@  (define_insn_and_split "atomic_loaddi_fp
   DONE;
 })
 
+;; Intel SDM guarantees that 16-byte VMOVDQA on 16-byte aligned address
+;; is atomic.  AMD will give a similar guarantee.
+(define_expand "atomic_loadti"
+  [(set (match_operand:TI 0 "register_operand" "=x,Yv")
+	(unspec:TI [(match_operand:TI 1 "memory_operand" "m,m")
+		    (match_operand:SI 2 "const_int_operand")]
+		   UNSPEC_LDA))]
+  "TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX"
+{
+  emit_insn (gen_atomic_loadti_1 (operands[0], operands[1]));
+  DONE;
+})
+
+(define_insn "atomic_loadti_1"
+  [(set (match_operand:TI 0 "register_operand" "=x,Yv")
+	(unspec:TI [(match_operand:TI 1 "memory_operand" "m,m")]
+		   UNSPEC_LDA))]
+  "TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX"
+  "@
+   vmovdqa\t{%1, %0|%0, %1}
+   vmovdqa64\t{%1, %0|%0, %1}"
+  [(set_attr "type" "ssemov")
+   (set_attr "prefix" "vex,evex")
+   (set_attr "mode" "TI")])
+
 (define_expand "atomic_store<mode>"
   [(set (match_operand:ATOMIC 0 "memory_operand")
 	(unspec:ATOMIC [(match_operand:ATOMIC 1 "nonimmediate_operand")
@@ -276,6 +301,36 @@  (define_insn "atomic_store<mode>_1"
   ""
   "%K2mov{<imodesuffix>}\t{%1, %0|%0, %1}")
 
+(define_expand "atomic_storeti"
+  [(set (match_operand:TI 0 "memory_operand")
+	(unspec:TI [(match_operand:TI 1 "register_operand")
+		    (match_operand:SI 2 "const_int_operand")]
+		   UNSPEC_STA))]
+  "TARGET_64BIT && TARGET_CMPXCHG16B && TARGET_AVX"
+{
+  enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
+
+  emit_insn (gen_atomic_storeti_1 (operands[0], operands[1], operands[2]));
+
+  /* ... followed by an MFENCE, if required.  */
+  if (is_mm_seq_cst (model))
+    emit_insn (gen_mem_thread_fence (operands[2]));
+  DONE;
+})
+
+(define_insn "atomic_storeti_1"
+  [(set (match_operand:TI 0 "memory_operand" "=m,m")
+	(unspec:TI [(match_operand:TI 1 "register_operand" "x,Yv")
+		     (match_operand:SI 2 "const_int_operand")]
+		    UNSPEC_STA))]
+  ""
+  "@
+   %K2vmovdqa\t{%1, %0|%0, %1}
+   %K2vmovdqa64\t{%1, %0|%0, %1}"
+  [(set_attr "type" "ssemov")
+   (set_attr "prefix" "vex,evex")
+   (set_attr "mode" "TI")])
+
 (define_insn_and_split "atomic_storedi_fpu"
   [(set (match_operand:DI 0 "memory_operand" "=m,m,m")
 	(unspec:DI [(match_operand:DI 1 "nonimmediate_operand" "x,m,?r")]
--- gcc/testsuite/gcc.target/i386/pr104688-1.c.jj	2022-11-13 19:36:43.251332612 -1200
+++ gcc/testsuite/gcc.target/i386/pr104688-1.c	2022-11-13 19:40:22.649334650 -1200
@@ -0,0 +1,34 @@ 
+/* PR target/104688 */
+/* { dg-do compile { target int128 } } */
+/* { dg-options "-O2 -mno-cx16" } */
+/* { dg-final { scan-assembler "\t__sync_val_compare_and_swap_16" } } */
+/* { dg-final { scan-assembler "\t__atomic_load_16" } } */
+/* { dg-final { scan-assembler "\t__atomic_store_16" } } */
+/* { dg-final { scan-assembler "\t__atomic_compare_exchange_16" } } */
+
+__int128 v;
+
+__int128
+f1 (void)
+{
+  return __sync_val_compare_and_swap (&v, 42, 0);
+}
+
+__int128
+f2 (void)
+{
+  return __atomic_load_n (&v, __ATOMIC_SEQ_CST);
+}
+
+void
+f3 (__int128 x)
+{
+  __atomic_store_n (&v, 42, __ATOMIC_SEQ_CST);
+}
+
+__int128
+f4 (void)
+{
+  __int128 y = 42;
+  __atomic_compare_exchange_n (&v, &y, 0, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+}
--- gcc/testsuite/gcc.target/i386/pr104688-2.c.jj	2022-11-13 19:36:46.513288025 -1200
+++ gcc/testsuite/gcc.target/i386/pr104688-2.c	2022-11-13 19:40:34.676170305 -1200
@@ -0,0 +1,34 @@ 
+/* PR target/104688 */
+/* { dg-do compile { target int128 } } */
+/* { dg-options "-O2 -mno-avx" } */
+/* { dg-final { scan-assembler "\t__sync_val_compare_and_swap_16" } } */
+/* { dg-final { scan-assembler "\t__atomic_load_16" } } */
+/* { dg-final { scan-assembler "\t__atomic_store_16" } } */
+/* { dg-final { scan-assembler "\t__atomic_compare_exchange_16" } } */
+
+__int128 v;
+
+__int128
+f1 (void)
+{
+  return __sync_val_compare_and_swap (&v, 42, 0);
+}
+
+__int128
+f2 (void)
+{
+  return __atomic_load_n (&v, __ATOMIC_SEQ_CST);
+}
+
+void
+f3 (__int128 x)
+{
+  __atomic_store_n (&v, 42, __ATOMIC_SEQ_CST);
+}
+
+__int128
+f4 (void)
+{
+  __int128 y = 42;
+  __atomic_compare_exchange_n (&v, &y, 0, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+}
--- gcc/testsuite/gcc.target/i386/pr104688-3.c.jj	2022-11-13 19:37:00.899091450 -1200
+++ gcc/testsuite/gcc.target/i386/pr104688-3.c	2022-11-13 19:40:41.984070460 -1200
@@ -0,0 +1,34 @@ 
+/* PR target/104688 */
+/* { dg-do compile { target int128 } } */
+/* { dg-options "-O2 -mcx16 -mavx" } */
+/* { dg-final { scan-assembler-not "\t__sync_val_compare_and_swap_16" } } */
+/* { dg-final { scan-assembler-not "\t__atomic_load_16" } } */
+/* { dg-final { scan-assembler-not "\t__atomic_store_16" } } */
+/* { dg-final { scan-assembler-not "\t__atomic_compare_exchange_16" } } */
+
+__int128 v;
+
+__int128
+f1 (void)
+{
+  return __sync_val_compare_and_swap (&v, 42, 0);
+}
+
+__int128
+f2 (void)
+{
+  return __atomic_load_n (&v, __ATOMIC_SEQ_CST);
+}
+
+void
+f3 (__int128 x)
+{
+  __atomic_store_n (&v, 42, __ATOMIC_SEQ_CST);
+}
+
+__int128
+f4 (void)
+{
+  __int128 y = 42;
+  __atomic_compare_exchange_n (&v, &y, 0, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+}