diff mbox series

bitint: Handle BIT_FIELD_REF lowering [PR114157]

Message ID ZebEn65xxr8S8Jqj@tucnak
State New
Headers show
Series bitint: Handle BIT_FIELD_REF lowering [PR114157] | expand

Commit Message

Jakub Jelinek March 5, 2024, 7:07 a.m. UTC
Hi!

The following patch adds support for BIT_FIELD_REF lowering with
large/huge _BitInt lhs.  BIT_FIELD_REF requires mode argument first
operand, so the operand shouldn't be any huge _BitInt.
If we only access limbs from inside of BIT_FIELD_REF using constant
indexes, we can just create a new BIT_FIELD_REF to extract the limb,
but if we need to use variable index in a loop, I'm afraid we need
to spill it into memory, which is what the following patch does.
If there is some bitwise type for the extraction, it extracts just
what we need and not more than that, otherwise it spills the whole
first argument of BIT_FIELD_REF and uses MEM_REF with an offset
with VIEW_CONVERT_EXPR around it.

Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk?

2024-03-05  Jakub Jelinek  <jakub@redhat.com>

	PR middle-end/114157
	* gimple-lower-bitint.cc: Include stor-layout.h.
	(mergeable_op): Return true for BIT_FIELD_REF.
	(struct bitint_large_huge): Declare handle_bit_field_ref method.
	(bitint_large_huge::handle_bit_field_ref): New method.
	(bitint_large_huge::handle_stmt): Use it for BIT_FIELD_REF.

	* gcc.dg/bitint-98.c: New test.
	* gcc.target/i386/avx2-pr114157.c: New test.
	* gcc.target/i386/avx512f-pr114157.c: New test.


	Jakub

Comments

Richard Biener March 5, 2024, 8:27 a.m. UTC | #1
On Tue, 5 Mar 2024, Jakub Jelinek wrote:

> Hi!
> 
> The following patch adds support for BIT_FIELD_REF lowering with
> large/huge _BitInt lhs.  BIT_FIELD_REF requires mode argument first
> operand, so the operand shouldn't be any huge _BitInt.
> If we only access limbs from inside of BIT_FIELD_REF using constant
> indexes, we can just create a new BIT_FIELD_REF to extract the limb,
> but if we need to use variable index in a loop, I'm afraid we need
> to spill it into memory, which is what the following patch does.

:/

If it's only ever "small" _BitInt and we'd want to optimize we could
fully unroll the loop at code generation time and thus avoid the
variable indices?  You could also lower the BIT_FIELD_REF to
variable shifts & masking I suppose.

Not sure if it's worth the trouble though.

> If there is some bitwise type for the extraction, it extracts just
> what we need and not more than that, otherwise it spills the whole
> first argument of BIT_FIELD_REF and uses MEM_REF with an offset
> with VIEW_CONVERT_EXPR around it.
> 
> Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk?

OK.

Thanks,
Richard.

> 2024-03-05  Jakub Jelinek  <jakub@redhat.com>
> 
> 	PR middle-end/114157
> 	* gimple-lower-bitint.cc: Include stor-layout.h.
> 	(mergeable_op): Return true for BIT_FIELD_REF.
> 	(struct bitint_large_huge): Declare handle_bit_field_ref method.
> 	(bitint_large_huge::handle_bit_field_ref): New method.
> 	(bitint_large_huge::handle_stmt): Use it for BIT_FIELD_REF.
> 
> 	* gcc.dg/bitint-98.c: New test.
> 	* gcc.target/i386/avx2-pr114157.c: New test.
> 	* gcc.target/i386/avx512f-pr114157.c: New test.
> 
> --- gcc/gimple-lower-bitint.cc.jj	2024-03-04 11:14:57.450288563 +0100
> +++ gcc/gimple-lower-bitint.cc	2024-03-04 18:51:06.833008534 +0100
> @@ -54,6 +54,7 @@ along with GCC; see the file COPYING3.
>  #include "tree-cfgcleanup.h"
>  #include "tree-switch-conversion.h"
>  #include "ubsan.h"
> +#include "stor-layout.h"
>  #include "gimple-lower-bitint.h"
>  
>  /* Split BITINT_TYPE precisions in 4 categories.  Small _BitInt, where
> @@ -212,6 +213,7 @@ mergeable_op (gimple *stmt)
>      case BIT_NOT_EXPR:
>      case SSA_NAME:
>      case INTEGER_CST:
> +    case BIT_FIELD_REF:
>        return true;
>      case LSHIFT_EXPR:
>        {
> @@ -435,6 +437,7 @@ struct bitint_large_huge
>    tree handle_plus_minus (tree_code, tree, tree, tree);
>    tree handle_lshift (tree, tree, tree);
>    tree handle_cast (tree, tree, tree);
> +  tree handle_bit_field_ref (tree, tree);
>    tree handle_load (gimple *, tree);
>    tree handle_stmt (gimple *, tree);
>    tree handle_operand_addr (tree, gimple *, int *, int *);
> @@ -1685,6 +1688,86 @@ bitint_large_huge::handle_cast (tree lhs
>    return NULL_TREE;
>  }
>  
> +/* Helper function for handle_stmt method, handle a BIT_FIELD_REF.  */
> +
> +tree
> +bitint_large_huge::handle_bit_field_ref (tree op, tree idx)
> +{
> +  if (tree_fits_uhwi_p (idx))
> +    {
> +      if (m_first)
> +	m_data.safe_push (NULL);
> +      ++m_data_cnt;
> +      unsigned HOST_WIDE_INT sz = tree_to_uhwi (TYPE_SIZE (m_limb_type));
> +      tree bfr = build3 (BIT_FIELD_REF, m_limb_type,
> +			 TREE_OPERAND (op, 0),
> +			 TYPE_SIZE (m_limb_type),
> +			 size_binop (PLUS_EXPR, TREE_OPERAND (op, 2),
> +				     bitsize_int (tree_to_uhwi (idx) * sz)));
> +      tree r = make_ssa_name (m_limb_type);
> +      gimple *g = gimple_build_assign (r, bfr);
> +      insert_before (g);
> +      tree type = limb_access_type (TREE_TYPE (op), idx);
> +      if (!useless_type_conversion_p (type, m_limb_type))
> +	r = add_cast (type, r);
> +      return r;
> +    }
> +  tree var;
> +  if (m_first)
> +    {
> +      unsigned HOST_WIDE_INT sz = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (op)));
> +      machine_mode mode;
> +      tree type, bfr;
> +      if (bitwise_mode_for_size (sz).exists (&mode)
> +	  && known_eq (GET_MODE_BITSIZE (mode), sz))
> +	type = bitwise_type_for_mode (mode);
> +      else
> +	{
> +	  mode = VOIDmode;
> +	  type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (op, 0)));
> +	}
> +      if (TYPE_ALIGN (type) < TYPE_ALIGN (TREE_TYPE (op)))
> +	type = build_aligned_type (type, TYPE_ALIGN (TREE_TYPE (op)));
> +      var = create_tmp_var (type);
> +      TREE_ADDRESSABLE (var) = 1;
> +      gimple *g;
> +      if (mode != VOIDmode)
> +	{
> +	  bfr = build3 (BIT_FIELD_REF, type, TREE_OPERAND (op, 0),
> +			TYPE_SIZE (type), TREE_OPERAND (op, 2));
> +	  g = gimple_build_assign (make_ssa_name (type),
> +				   BIT_FIELD_REF, bfr);
> +	  gimple_set_location (g, m_loc);
> +	  gsi_insert_after (&m_init_gsi, g, GSI_NEW_STMT);
> +	  bfr = gimple_assign_lhs (g);
> +	}
> +      else
> +	bfr = TREE_OPERAND (op, 0);
> +      g = gimple_build_assign (var, bfr);
> +      gimple_set_location (g, m_loc);
> +      gsi_insert_after (&m_init_gsi, g, GSI_NEW_STMT);
> +      if (mode == VOIDmode)
> +	{
> +	  unsigned HOST_WIDE_INT nelts
> +	    = CEIL (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (op))), limb_prec);
> +	  tree atype = build_array_type_nelts (m_limb_type, nelts);
> +	  var = build2 (MEM_REF, atype, build_fold_addr_expr (var),
> +			build_int_cst (build_pointer_type (type),
> +				       tree_to_uhwi (TREE_OPERAND (op, 2))
> +				       / BITS_PER_UNIT));
> +	}
> +      m_data.safe_push (var);
> +    }
> +  else
> +    var = unshare_expr (m_data[m_data_cnt]);
> +  ++m_data_cnt;
> +  var = limb_access (TREE_TYPE (op), var, idx, false);
> +  tree r = make_ssa_name (m_limb_type);
> +  gimple *g = gimple_build_assign (r, var);
> +  insert_before (g);
> +  return r;
> +}
> +
>  /* Add a new EH edge from SRC to EH_EDGE->dest, where EH_EDGE
>     is an older EH edge, and except for virtual PHIs duplicate the
>     PHI argument from the EH_EDGE to the new EH edge.  */
> @@ -2019,6 +2102,8 @@ bitint_large_huge::handle_stmt (gimple *
>  	  return handle_cast (TREE_TYPE (gimple_assign_lhs (stmt)),
>  			      TREE_OPERAND (gimple_assign_rhs1 (stmt), 0),
>  			      idx);
> +	case BIT_FIELD_REF:
> +	  return handle_bit_field_ref (gimple_assign_rhs1 (stmt), idx);
>  	default:
>  	  break;
>  	}
> --- gcc/testsuite/gcc.dg/bitint-98.c.jj	2024-03-04 19:11:46.355244244 +0100
> +++ gcc/testsuite/gcc.dg/bitint-98.c	2024-03-04 19:10:35.139207685 +0100
> @@ -0,0 +1,50 @@
> +/* PR middle-end/114157 */
> +/* { dg-do compile { target bitint } } */
> +/* { dg-options "-O2 -std=c23 -Wno-psabi -w" } */
> +
> +#if __BITINT_MAXWIDTH__ >= 256
> +_BitInt(256) d;
> +_BitInt(255) e;
> +
> +void
> +foo (long __attribute__((vector_size (64))) s)
> +{
> +  __builtin_memmove (&d, &s, sizeof (d));
> +}
> +
> +void
> +bar (_BitInt(512) x)
> +{
> +  long __attribute__((vector_size (64))) s;
> +  __builtin_memcpy (&s, &x, sizeof (s));
> +  __builtin_memcpy (&d, &s, sizeof (d));
> +}
> +
> +void
> +baz (long __attribute__((vector_size (64))) s)
> +{
> +  _BitInt(256) d;
> +  __builtin_memmove (&d, &s, sizeof (d));
> +  e = d;
> +}
> +
> +void
> +qux (long __attribute__((vector_size (64))) s)
> +{
> +  _BitInt(192) d;
> +  __builtin_memmove (&d, &s, sizeof (d));
> +  e = d;
> +}
> +#else
> +int i;
> +#endif
> +
> +#if __BITINT_MAXWIDTH__ >= 1024
> +_BitInt(512)
> +corge (long __attribute__((vector_size (1024))) s)
> +{
> +  _BitInt(512) d;
> +  __builtin_memcpy (&d, &s, sizeof (d));
> +  return d;
> +}
> +#endif
> --- gcc/testsuite/gcc.target/i386/avx2-pr114157.c.jj	2024-03-04 19:12:46.001437331 +0100
> +++ gcc/testsuite/gcc.target/i386/avx2-pr114157.c	2024-03-04 19:12:31.639631618 +0100
> @@ -0,0 +1,5 @@
> +/* PR middle-end/114157 */
> +/* { dg-do compile { target bitint } } */
> +/* { dg-options "-O2 -std=c23 -Wno-psabi -w -mavx2 -mno-avx512f" } */
> +
> +#include "../../gcc.dg/bitint-98.c"
> --- gcc/testsuite/gcc.target/i386/avx512f-pr114157.c.jj	2024-03-04 19:13:01.190231847 +0100
> +++ gcc/testsuite/gcc.target/i386/avx512f-pr114157.c	2024-03-04 19:13:12.018085362 +0100
> @@ -0,0 +1,5 @@
> +/* PR middle-end/114157 */
> +/* { dg-do compile { target bitint } } */
> +/* { dg-options "-O2 -std=c23 -Wno-psabi -w -mavx512f" } */
> +
> +#include "../../gcc.dg/bitint-98.c"
> 
> 	Jakub
> 
>
Jakub Jelinek March 5, 2024, 8:42 a.m. UTC | #2
On Tue, Mar 05, 2024 at 09:27:22AM +0100, Richard Biener wrote:
> On Tue, 5 Mar 2024, Jakub Jelinek wrote:
> > The following patch adds support for BIT_FIELD_REF lowering with
> > large/huge _BitInt lhs.  BIT_FIELD_REF requires mode argument first
> > operand, so the operand shouldn't be any huge _BitInt.
> > If we only access limbs from inside of BIT_FIELD_REF using constant
> > indexes, we can just create a new BIT_FIELD_REF to extract the limb,
> > but if we need to use variable index in a loop, I'm afraid we need
> > to spill it into memory, which is what the following patch does.
> 
> :/
> 
> If it's only ever "small" _BitInt and we'd want to optimize we could
> fully unroll the loop at code generation time and thus avoid the
> variable indices?  You could also lower the BIT_FIELD_REF to
> variable shifts & masking I suppose.

Not really sure if one can have some of the SVE/RISCV modes in there,
that couldn't be small anymore.  But otherwise yes, likely right now at most
64 byte vectors aka 512 bits.  Now, if it is say extraction of _BitInt(448)
out of it (so that it isn't just VCE instead), that would still mean
e.g. on ia32 unrolling the loop with 7 iterations handling 2 limbs each.
14 is already huge I'm afraid especially when it can be hidden somewhere in
the middle of a large expression which is all mergeable.
But more importantly, currently there are simple rules, large _BitInt
implies straight line code, huge _BitInt implies a loop and the loop handles
just 2 limbs (for other operations just 1 limb) per iteration.  Changing
that depending on what trees are somewhere used would be a nightmare.
The idea was that if it is worth unrolling, unroller can unroll it later
and at that point I'd think e.g. FRE would optimize away the temporary
memory.

For variable shifts/masking I'd need some type in which I can do it.
Sure, perhaps if the inner operand is a vector I could use some non-constant
permutations or similar.

	Jakub
Richard Biener March 5, 2024, 8:53 a.m. UTC | #3
On Tue, 5 Mar 2024, Jakub Jelinek wrote:

> On Tue, Mar 05, 2024 at 09:27:22AM +0100, Richard Biener wrote:
> > On Tue, 5 Mar 2024, Jakub Jelinek wrote:
> > > The following patch adds support for BIT_FIELD_REF lowering with
> > > large/huge _BitInt lhs.  BIT_FIELD_REF requires mode argument first
> > > operand, so the operand shouldn't be any huge _BitInt.
> > > If we only access limbs from inside of BIT_FIELD_REF using constant
> > > indexes, we can just create a new BIT_FIELD_REF to extract the limb,
> > > but if we need to use variable index in a loop, I'm afraid we need
> > > to spill it into memory, which is what the following patch does.
> > 
> > :/
> > 
> > If it's only ever "small" _BitInt and we'd want to optimize we could
> > fully unroll the loop at code generation time and thus avoid the
> > variable indices?  You could also lower the BIT_FIELD_REF to
> > variable shifts & masking I suppose.
> 
> Not really sure if one can have some of the SVE/RISCV modes in there,
> that couldn't be small anymore.  But otherwise yes, likely right now at most
> 64 byte vectors aka 512 bits.  Now, if it is say extraction of _BitInt(448)
> out of it (so that it isn't just VCE instead), that would still mean
> e.g. on ia32 unrolling the loop with 7 iterations handling 2 limbs each.
> 14 is already huge I'm afraid especially when it can be hidden somewhere in
> the middle of a large expression which is all mergeable.
> But more importantly, currently there are simple rules, large _BitInt
> implies straight line code, huge _BitInt implies a loop and the loop handles
> just 2 limbs (for other operations just 1 limb) per iteration.  Changing
> that depending on what trees are somewhere used would be a nightmare.
> The idea was that if it is worth unrolling, unroller can unroll it later
> and at that point I'd think e.g. FRE would optimize away the temporary
> memory.

Yeah, I would also guess FRE would optimize it though the question is
whether the unroller heuristic anticipates it or the loop is small
enough.  I guess we can worry when it shows to be a problem.

> For variable shifts/masking I'd need some type in which I can do it.

Ah, sure ... OTOH somehow RTL expansion manages to do it ;)

> Sure, perhaps if the inner operand is a vector I could use some non-constant
> permutations or similar.

If the extraction is byte aligned sure, maybe if the extraction is
from a single limb then it can be lowered without an extra temporary.

Richard.
diff mbox series

Patch

--- gcc/gimple-lower-bitint.cc.jj	2024-03-04 11:14:57.450288563 +0100
+++ gcc/gimple-lower-bitint.cc	2024-03-04 18:51:06.833008534 +0100
@@ -54,6 +54,7 @@  along with GCC; see the file COPYING3.
 #include "tree-cfgcleanup.h"
 #include "tree-switch-conversion.h"
 #include "ubsan.h"
+#include "stor-layout.h"
 #include "gimple-lower-bitint.h"
 
 /* Split BITINT_TYPE precisions in 4 categories.  Small _BitInt, where
@@ -212,6 +213,7 @@  mergeable_op (gimple *stmt)
     case BIT_NOT_EXPR:
     case SSA_NAME:
     case INTEGER_CST:
+    case BIT_FIELD_REF:
       return true;
     case LSHIFT_EXPR:
       {
@@ -435,6 +437,7 @@  struct bitint_large_huge
   tree handle_plus_minus (tree_code, tree, tree, tree);
   tree handle_lshift (tree, tree, tree);
   tree handle_cast (tree, tree, tree);
+  tree handle_bit_field_ref (tree, tree);
   tree handle_load (gimple *, tree);
   tree handle_stmt (gimple *, tree);
   tree handle_operand_addr (tree, gimple *, int *, int *);
@@ -1685,6 +1688,86 @@  bitint_large_huge::handle_cast (tree lhs
   return NULL_TREE;
 }
 
+/* Helper function for handle_stmt method, handle a BIT_FIELD_REF.  */
+
+tree
+bitint_large_huge::handle_bit_field_ref (tree op, tree idx)
+{
+  if (tree_fits_uhwi_p (idx))
+    {
+      if (m_first)
+	m_data.safe_push (NULL);
+      ++m_data_cnt;
+      unsigned HOST_WIDE_INT sz = tree_to_uhwi (TYPE_SIZE (m_limb_type));
+      tree bfr = build3 (BIT_FIELD_REF, m_limb_type,
+			 TREE_OPERAND (op, 0),
+			 TYPE_SIZE (m_limb_type),
+			 size_binop (PLUS_EXPR, TREE_OPERAND (op, 2),
+				     bitsize_int (tree_to_uhwi (idx) * sz)));
+      tree r = make_ssa_name (m_limb_type);
+      gimple *g = gimple_build_assign (r, bfr);
+      insert_before (g);
+      tree type = limb_access_type (TREE_TYPE (op), idx);
+      if (!useless_type_conversion_p (type, m_limb_type))
+	r = add_cast (type, r);
+      return r;
+    }
+  tree var;
+  if (m_first)
+    {
+      unsigned HOST_WIDE_INT sz = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (op)));
+      machine_mode mode;
+      tree type, bfr;
+      if (bitwise_mode_for_size (sz).exists (&mode)
+	  && known_eq (GET_MODE_BITSIZE (mode), sz))
+	type = bitwise_type_for_mode (mode);
+      else
+	{
+	  mode = VOIDmode;
+	  type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (op, 0)));
+	}
+      if (TYPE_ALIGN (type) < TYPE_ALIGN (TREE_TYPE (op)))
+	type = build_aligned_type (type, TYPE_ALIGN (TREE_TYPE (op)));
+      var = create_tmp_var (type);
+      TREE_ADDRESSABLE (var) = 1;
+      gimple *g;
+      if (mode != VOIDmode)
+	{
+	  bfr = build3 (BIT_FIELD_REF, type, TREE_OPERAND (op, 0),
+			TYPE_SIZE (type), TREE_OPERAND (op, 2));
+	  g = gimple_build_assign (make_ssa_name (type),
+				   BIT_FIELD_REF, bfr);
+	  gimple_set_location (g, m_loc);
+	  gsi_insert_after (&m_init_gsi, g, GSI_NEW_STMT);
+	  bfr = gimple_assign_lhs (g);
+	}
+      else
+	bfr = TREE_OPERAND (op, 0);
+      g = gimple_build_assign (var, bfr);
+      gimple_set_location (g, m_loc);
+      gsi_insert_after (&m_init_gsi, g, GSI_NEW_STMT);
+      if (mode == VOIDmode)
+	{
+	  unsigned HOST_WIDE_INT nelts
+	    = CEIL (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (op))), limb_prec);
+	  tree atype = build_array_type_nelts (m_limb_type, nelts);
+	  var = build2 (MEM_REF, atype, build_fold_addr_expr (var),
+			build_int_cst (build_pointer_type (type),
+				       tree_to_uhwi (TREE_OPERAND (op, 2))
+				       / BITS_PER_UNIT));
+	}
+      m_data.safe_push (var);
+    }
+  else
+    var = unshare_expr (m_data[m_data_cnt]);
+  ++m_data_cnt;
+  var = limb_access (TREE_TYPE (op), var, idx, false);
+  tree r = make_ssa_name (m_limb_type);
+  gimple *g = gimple_build_assign (r, var);
+  insert_before (g);
+  return r;
+}
+
 /* Add a new EH edge from SRC to EH_EDGE->dest, where EH_EDGE
    is an older EH edge, and except for virtual PHIs duplicate the
    PHI argument from the EH_EDGE to the new EH edge.  */
@@ -2019,6 +2102,8 @@  bitint_large_huge::handle_stmt (gimple *
 	  return handle_cast (TREE_TYPE (gimple_assign_lhs (stmt)),
 			      TREE_OPERAND (gimple_assign_rhs1 (stmt), 0),
 			      idx);
+	case BIT_FIELD_REF:
+	  return handle_bit_field_ref (gimple_assign_rhs1 (stmt), idx);
 	default:
 	  break;
 	}
--- gcc/testsuite/gcc.dg/bitint-98.c.jj	2024-03-04 19:11:46.355244244 +0100
+++ gcc/testsuite/gcc.dg/bitint-98.c	2024-03-04 19:10:35.139207685 +0100
@@ -0,0 +1,50 @@ 
+/* PR middle-end/114157 */
+/* { dg-do compile { target bitint } } */
+/* { dg-options "-O2 -std=c23 -Wno-psabi -w" } */
+
+#if __BITINT_MAXWIDTH__ >= 256
+_BitInt(256) d;
+_BitInt(255) e;
+
+void
+foo (long __attribute__((vector_size (64))) s)
+{
+  __builtin_memmove (&d, &s, sizeof (d));
+}
+
+void
+bar (_BitInt(512) x)
+{
+  long __attribute__((vector_size (64))) s;
+  __builtin_memcpy (&s, &x, sizeof (s));
+  __builtin_memcpy (&d, &s, sizeof (d));
+}
+
+void
+baz (long __attribute__((vector_size (64))) s)
+{
+  _BitInt(256) d;
+  __builtin_memmove (&d, &s, sizeof (d));
+  e = d;
+}
+
+void
+qux (long __attribute__((vector_size (64))) s)
+{
+  _BitInt(192) d;
+  __builtin_memmove (&d, &s, sizeof (d));
+  e = d;
+}
+#else
+int i;
+#endif
+
+#if __BITINT_MAXWIDTH__ >= 1024
+_BitInt(512)
+corge (long __attribute__((vector_size (1024))) s)
+{
+  _BitInt(512) d;
+  __builtin_memcpy (&d, &s, sizeof (d));
+  return d;
+}
+#endif
--- gcc/testsuite/gcc.target/i386/avx2-pr114157.c.jj	2024-03-04 19:12:46.001437331 +0100
+++ gcc/testsuite/gcc.target/i386/avx2-pr114157.c	2024-03-04 19:12:31.639631618 +0100
@@ -0,0 +1,5 @@ 
+/* PR middle-end/114157 */
+/* { dg-do compile { target bitint } } */
+/* { dg-options "-O2 -std=c23 -Wno-psabi -w -mavx2 -mno-avx512f" } */
+
+#include "../../gcc.dg/bitint-98.c"
--- gcc/testsuite/gcc.target/i386/avx512f-pr114157.c.jj	2024-03-04 19:13:01.190231847 +0100
+++ gcc/testsuite/gcc.target/i386/avx512f-pr114157.c	2024-03-04 19:13:12.018085362 +0100
@@ -0,0 +1,5 @@ 
+/* PR middle-end/114157 */
+/* { dg-do compile { target bitint } } */
+/* { dg-options "-O2 -std=c23 -Wno-psabi -w -mavx512f" } */
+
+#include "../../gcc.dg/bitint-98.c"