diff mbox series

[3/4] AArch64: add new alternative with early clobber to patterns

Message ID ZkSOdp2buUMweuu9@arm.com
State New
Headers show
Series AArch64: support conditional early clobbers on certain operations. | expand

Commit Message

Tamar Christina May 15, 2024, 10:29 a.m. UTC
Hi All,

This patch adds new alternatives to the patterns which are affected.  The new
alternatives with the conditional early clobbers are added before the normal
ones in order for LRA to prefer them in the event that we have enough free
registers to accommodate them.

In case register pressure is too high the normal alternatives will be preferred
before a reload is considered as we rather have the tie than a spill.

Tests are in the next patch.

Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.

Ok for master?

Thanks,
Tamar

gcc/ChangeLog:

	* config/aarch64/aarch64-sve.md (and<mode>3,
	@aarch64_pred_<optab><mode>_z, *<optab><mode>3_cc,
	*<optab><mode>3_ptest, aarch64_pred_<nlogical><mode>_z,
	*<nlogical><mode>3_cc, *<nlogical><mode>3_ptest,
	aarch64_pred_<logical_nn><mode>_z, *<logical_nn><mode>3_cc,
	*<logical_nn><mode>3_ptest, @aarch64_pred_cmp<cmp_op><mode>,
	*cmp<cmp_op><mode>_cc, *cmp<cmp_op><mode>_ptest,
	@aarch64_pred_cmp<cmp_op><mode>_wide,
	*aarch64_pred_cmp<cmp_op><mode>_wide_cc,
	*aarch64_pred_cmp<cmp_op><mode>_wide_ptest, @aarch64_brk<brk_op>,
	*aarch64_brk<brk_op>_cc, *aarch64_brk<brk_op>_ptest,
	@aarch64_brk<brk_op>, *aarch64_brkn_cc, *aarch64_brkn_ptest,
	*aarch64_brk<brk_op>_cc, *aarch64_brk<brk_op>_ptest,
	aarch64_rdffr_z, *aarch64_rdffr_z_ptest, *aarch64_rdffr_ptest,
	*aarch64_rdffr_z_cc, *aarch64_rdffr_cc): Add new early clobber
	alternative.
	* config/aarch64/aarch64-sve2.md
	(@aarch64_pred_<sve_int_op><mode>): Likewise.

---




--
diff mbox series

Patch

diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index 839ab0627747d7a49bef7b0192ee9e7a42587ca0..93ec59e58afee260b85082c472db2abfea7386b6 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -1161,8 +1161,9 @@  (define_insn "aarch64_rdffr_z"
 	  (reg:VNx16BI FFRT_REGNUM)
 	  (match_operand:VNx16BI 1 "register_operand")))]
   "TARGET_SVE && TARGET_NON_STREAMING"
-  {@ [ cons: =0, 1   ]
-     [ Upa     , Upa ] rdffr\t%0.b, %1/z
+  {@ [ cons: =0, 1  ; attrs: pred_clobber ]
+     [ &Upa    , Upa; yes                 ] rdffr\t%0.b, %1/z
+     [ Upa     , Upa; *                   ] ^
   }
 )
 
@@ -1179,8 +1180,9 @@  (define_insn "*aarch64_rdffr_z_ptest"
 	  UNSPEC_PTEST))
    (clobber (match_scratch:VNx16BI 0))]
   "TARGET_SVE && TARGET_NON_STREAMING"
-  {@ [ cons: =0, 1  , 2 ]
-     [ Upa     , Upa,   ] rdffrs\t%0.b, %1/z
+  {@ [ cons: =0, 1  , 2; attrs: pred_clobber ]
+     [ &Upa    , Upa,  ; yes                 ] rdffrs\t%0.b, %1/z
+     [ Upa     , Upa,  ; *                   ] ^
   }
 )
 
@@ -1195,8 +1197,9 @@  (define_insn "*aarch64_rdffr_ptest"
 	  UNSPEC_PTEST))
    (clobber (match_scratch:VNx16BI 0))]
   "TARGET_SVE && TARGET_NON_STREAMING"
-  {@ [ cons: =0, 1   ]
-     [ Upa     , Upa ] rdffrs\t%0.b, %1/z
+  {@ [ cons: =0, 1  ; attrs: pred_clobber ]
+     [ &Upa    , Upa; yes                 ] rdffrs\t%0.b, %1/z
+     [ Upa     , Upa; *                   ] ^
   }
 )
 
@@ -1216,8 +1219,9 @@  (define_insn "*aarch64_rdffr_z_cc"
 	  (reg:VNx16BI FFRT_REGNUM)
 	  (match_dup 1)))]
   "TARGET_SVE && TARGET_NON_STREAMING"
-  {@ [ cons: =0, 1  , 2 ]
-     [ Upa     , Upa,   ] rdffrs\t%0.b, %1/z
+  {@ [ cons: =0, 1  , 2; attrs: pred_clobber ]
+     [ &Upa    , Upa,  ; yes                 ] rdffrs\t%0.b, %1/z
+     [ Upa     , Upa,  ; *                   ] ^
   }
 )
 
@@ -1233,8 +1237,9 @@  (define_insn "*aarch64_rdffr_cc"
    (set (match_operand:VNx16BI 0 "register_operand")
 	(reg:VNx16BI FFRT_REGNUM))]
   "TARGET_SVE && TARGET_NON_STREAMING"
-  {@ [ cons: =0, 1  , 2 ]
-     [ Upa     , Upa,   ] rdffrs\t%0.b, %1/z
+  {@ [ cons: =0, 1  , 2; attrs: pred_clobber ]
+     [ &Upa    , Upa,  ; yes                 ] rdffrs\t%0.b, %1/z
+     [ Upa     , Upa,  ; *                   ] ^
   }
 )
 
@@ -6651,8 +6656,9 @@  (define_insn "and<mode>3"
 	(and:PRED_ALL (match_operand:PRED_ALL 1 "register_operand")
 		      (match_operand:PRED_ALL 2 "register_operand")))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2   ]
-     [ Upa     , Upa, Upa ] and\t%0.b, %1/z, %2.b, %2.b
+  {@ [ cons: =0, 1  , 2  ; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa; yes                 ] and\t%0.b, %1/z, %2.b, %2.b
+     [ Upa     , Upa, Upa; *                   ] ^
   }
 )
 
@@ -6679,8 +6685,9 @@  (define_insn "@aarch64_pred_<optab><mode>_z"
 	    (match_operand:PRED_ALL 3 "register_operand"))
 	  (match_operand:PRED_ALL 1 "register_operand")))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3   ]
-     [ Upa     , Upa, Upa, Upa ] <logical>\t%0.b, %1/z, %2.b, %3.b
+  {@ [ cons: =0, 1  , 2  , 3  ; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, Upa; yes                 ] <logical>\t%0.b, %1/z, %2.b, %3.b
+     [ Upa     , Upa, Upa, Upa; *                   ] ^
   }
 )
 
@@ -6703,8 +6710,9 @@  (define_insn "*<optab><mode>3_cc"
 	(and:PRED_ALL (LOGICAL:PRED_ALL (match_dup 2) (match_dup 3))
 		      (match_dup 4)))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3  , 4, 5 ]
-     [ Upa     , Upa, Upa, Upa,  ,   ] <logical>s\t%0.b, %1/z, %2.b, %3.b
+  {@ [ cons: =0, 1  , 2  , 3  , 4, 5; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, Upa,  ,  ; yes                 ] <logical>s\t%0.b, %1/z, %2.b, %3.b
+     [ Upa     , Upa, Upa, Upa,  ,  ; *                   ] ^
   }
 )
 
@@ -6723,8 +6731,9 @@  (define_insn "*<optab><mode>3_ptest"
 	  UNSPEC_PTEST))
    (clobber (match_scratch:VNx16BI 0))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3  , 4, 5 ]
-     [ Upa     , Upa, Upa, Upa,  ,   ] <logical>s\t%0.b, %1/z, %2.b, %3.b
+  {@ [ cons: =0, 1  , 2  , 3  , 4, 5; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, Upa,  ,  ; yes                 ] <logical>s\t%0.b, %1/z, %2.b, %3.b
+     [ Upa     , Upa, Upa, Upa,  ,  ; *                   ] ^
   }
 )
 
@@ -6745,8 +6754,9 @@  (define_insn "aarch64_pred_<nlogical><mode>_z"
 	    (match_operand:PRED_ALL 2 "register_operand"))
 	  (match_operand:PRED_ALL 1 "register_operand")))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3   ]
-     [ Upa     , Upa, Upa, Upa ] <nlogical>\t%0.b, %1/z, %2.b, %3.b
+  {@ [ cons: =0, 1  , 2  , 3  ; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, Upa; yes                 ] <nlogical>\t%0.b, %1/z, %2.b, %3.b
+     [ Upa     , Upa, Upa, Upa; *                   ] ^
   }
 )
 
@@ -6770,8 +6780,9 @@  (define_insn "*<nlogical><mode>3_cc"
 			(match_dup 2))
 		      (match_dup 4)))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3  , 4, 5 ]
-     [ Upa     , Upa, Upa, Upa,  ,   ] <nlogical>s\t%0.b, %1/z, %2.b, %3.b
+  {@ [ cons: =0, 1  , 2  , 3  , 4, 5; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, Upa,  ,  ; yes                 ] <nlogical>s\t%0.b, %1/z, %2.b, %3.b
+     [ Upa     , Upa, Upa, Upa,  ,  ; *                   ] ^
   }
 )
 
@@ -6791,8 +6802,9 @@  (define_insn "*<nlogical><mode>3_ptest"
 	  UNSPEC_PTEST))
    (clobber (match_scratch:VNx16BI 0))]
   "TARGET_SVE"
-  {@ [ cons:  =0, 1  , 2  , 3  , 4, 5 ]
-     [ Upa      , Upa, Upa, Upa,  ,   ] <nlogical>s\t%0.b, %1/z, %2.b, %3.b
+  {@ [ cons:  =0, 1  , 2  , 3  , 4, 5; attrs: pred_clobber ]
+     [ &Upa     , Upa, Upa, Upa,  ,  ; yes                 ] <nlogical>s\t%0.b, %1/z, %2.b, %3.b
+     [ Upa      , Upa, Upa, Upa,  ,  ; *                   ] ^
   }
 )
 
@@ -6813,8 +6825,9 @@  (define_insn "aarch64_pred_<logical_nn><mode>_z"
 	    (not:PRED_ALL (match_operand:PRED_ALL 3 "register_operand")))
 	  (match_operand:PRED_ALL 1 "register_operand")))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3   ]
-     [ Upa     , Upa, Upa, Upa ] <logical_nn>\t%0.b, %1/z, %2.b, %3.b
+  {@ [ cons: =0,  1 , 2  , 3  ; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, Upa; yes                 ] <logical_nn>\t%0.b, %1/z, %2.b, %3.b
+     [ Upa     , Upa, Upa, Upa; *                   ] ^
   }
 )
 
@@ -6839,8 +6852,9 @@  (define_insn "*<logical_nn><mode>3_cc"
 			(not:PRED_ALL (match_dup 3)))
 		      (match_dup 4)))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3  , 4, 5 ]
-     [ Upa     , Upa, Upa, Upa,  ,   ] <logical_nn>s\t%0.b, %1/z, %2.b, %3.b
+  {@ [ cons: =0, 1  , 2  , 3  , 4, 5; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, Upa,  ,  ; yes                 ] <logical_nn>s\t%0.b, %1/z, %2.b, %3.b
+     [ Upa     , Upa, Upa, Upa,  ,  ; *                   ] ^
   }
 )
 
@@ -6861,8 +6875,9 @@  (define_insn "*<logical_nn><mode>3_ptest"
 	  UNSPEC_PTEST))
    (clobber (match_scratch:VNx16BI 0))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3  , 4, 5 ]
-     [ Upa     , Upa, Upa, Upa,  ,   ] <logical_nn>s\t%0.b, %1/z, %2.b, %3.b
+  {@ [ cons: =0, 1  , 2  , 3  , 4, 5; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, Upa,  ,  ; yes                 ] <logical_nn>s\t%0.b, %1/z, %2.b, %3.b
+     [ Upa     , Upa, Upa, Upa,  ,  ; *                   ] ^
   }
 )
 
@@ -8104,9 +8119,11 @@  (define_insn "@aarch64_pred_cmp<cmp_op><mode>"
 	  UNSPEC_PRED_Z))
    (clobber (reg:CC_NZC CC_REGNUM))]
   "TARGET_SVE"
-  {@ [ cons: =0 , 1   , 3 , 4              ]
-     [ Upa      , Upl , w , <sve_imm_con>  ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #%4
-     [ Upa      , Upl , w , w              ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>
+  {@ [ cons: =0 , 1   , 3 , 4            ; attrs: pred_clobber ]
+     [ &Upa     , Upl , w , <sve_imm_con>; yes                 ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, #%4
+     [ Upa      , Upl , w , <sve_imm_con>; *                   ] ^
+     [ &Upa     , Upl , w , w            ; yes                 ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>
+     [ Upa      , Upl , w , w            ; *                   ] ^
   }
 )
 
@@ -8136,9 +8153,11 @@  (define_insn_and_rewrite "*cmp<cmp_op><mode>_cc"
 	  UNSPEC_PRED_Z))]
   "TARGET_SVE
    && aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
-  {@ [ cons: =0 , 1   , 2 , 3              ]
-     [ Upa      , Upl , w , <sve_imm_con>  ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
-     [ Upa      , Upl , w , w              ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>
+  {@ [ cons: =0 , 1    , 2 , 3            ; attrs: pred_clobber ]
+     [ &Upa     ,  Upl , w , <sve_imm_con>; yes                 ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
+     [ Upa      ,  Upl , w , <sve_imm_con>; *                   ] ^
+     [ &Upa     ,  Upl , w , w            ; yes                 ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>
+     [ Upa      ,  Upl , w , w            ; *                   ] ^
   }
   "&& !rtx_equal_p (operands[4], operands[6])"
   {
@@ -8166,9 +8185,11 @@  (define_insn_and_rewrite "*cmp<cmp_op><mode>_ptest"
    (clobber (match_scratch:<VPRED> 0))]
   "TARGET_SVE
    && aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
-  {@ [ cons: =0, 1  , 2 , 3              ]
-     [ Upa     , Upl, w , <sve_imm_con>  ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
-     [ Upa     , Upl, w , w              ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>
+  {@ [ cons: =0, 1   , 2 , 3            ; attrs: pred_clobber ]
+     [ &Upa    ,  Upl, w , <sve_imm_con>; yes                 ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, #%3
+     [ Upa     ,  Upl, w , <sve_imm_con>; *                   ] ^
+     [ &Upa    ,  Upl, w , w            ; yes                 ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>
+     [ Upa     ,  Upl, w , w            ; *                   ] ^
   }
   "&& !rtx_equal_p (operands[4], operands[6])"
   {
@@ -8221,8 +8242,9 @@  (define_insn "@aarch64_pred_cmp<cmp_op><mode>_wide"
 	  UNSPEC_PRED_Z))
    (clobber (reg:CC_NZC CC_REGNUM))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2, 3, 4 ]
-     [ Upa     , Upl,  , w, w ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.d
+  {@ [ cons: =0, 1   , 2, 3, 4; attrs: pred_clobber ]
+     [ &Upa    ,  Upl,  , w, w; yes                 ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.d
+     [ Upa     ,  Upl,  , w, w; *                   ] ^
   }
 )
 
@@ -8254,8 +8276,9 @@  (define_insn "*aarch64_pred_cmp<cmp_op><mode>_wide_cc"
 	  UNSPEC_PRED_Z))]
   "TARGET_SVE
    && aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
-  {@ [ cons: =0, 1  , 2, 3, 4, 5, 6  , 7 ]
-     [ Upa     , Upl, w, w,  ,  , Upl,   ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.d
+  {@ [ cons: =0, 1   , 2, 3, 4, 5, 6  , 7; attrs: pred_clobber ]
+     [ &Upa    ,  Upl, w, w,  ,  , Upl,  ; yes                 ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.d
+     [ Upa     ,  Upl, w, w,  ,  , Upl,  ; *                   ] ^
   }
 )
 
@@ -8279,8 +8302,9 @@  (define_insn "*aarch64_pred_cmp<cmp_op><mode>_wide_ptest"
    (clobber (match_scratch:<VPRED> 0))]
   "TARGET_SVE
    && aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
-  {@ [ cons:  =0, 1  , 2, 3, 4, 5, 6  , 7 ]
-     [ Upa      , Upl, w, w,  ,  , Upl,   ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.d
+  {@ [ cons:  =0, 1   , 2, 3, 4, 5, 6  , 7; attrs: pred_clobber ]
+     [ &Upa     ,  Upl, w, w,  ,  , Upl,  ; yes                 ] cmp<cmp_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.d
+     [ Upa      ,  Upl, w, w,  ,  , Upl,  ; *                   ] ^
   }
 )
 
@@ -9948,9 +9972,11 @@  (define_insn "@aarch64_brk<brk_op>"
 	   (match_operand:VNx16BI 3 "aarch64_simd_reg_or_zero")]
 	  SVE_BRK_UNARY))]
   "TARGET_SVE"
-  {@ [ cons: =0 , 1   , 2   , 3   ]
-     [ Upa      , Upa , Upa , Dz  ] brk<brk_op>\t%0.b, %1/z, %2.b
-     [ Upa      , Upa , Upa , 0   ] brk<brk_op>\t%0.b, %1/m, %2.b
+  {@ [ cons: =0 , 1   , 2   , 3  ; attrs: pred_clobber ]
+     [ &Upa     ,  Upa , Upa , Dz; yes                 ] brk<brk_op>\t%0.b, %1/z, %2.b
+     [ Upa      ,  Upa , Upa , Dz; *                   ] ^
+     [ &Upa     ,  Upa , Upa , 0 ; yes                 ] brk<brk_op>\t%0.b, %1/m, %2.b
+     [ Upa      ,  Upa , Upa , 0 ; *                   ] ^
   }
 )
 
@@ -9974,8 +10000,9 @@  (define_insn "*aarch64_brk<brk_op>_cc"
 	   (match_dup 3)]
 	  SVE_BRK_UNARY))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3, 4 ]
-     [ Upa     , Upa, Upa,  ,   ] brk<brk_op>s\t%0.b, %1/z, %2.b
+  {@ [ cons: =0, 1  , 2  , 3, 4; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa,  ,  ; yes                 ] brk<brk_op>s\t%0.b, %1/z, %2.b
+     [ Upa     , Upa, Upa,  ,  ; *                   ] ^
   }
 )
 
@@ -9994,8 +10021,9 @@  (define_insn "*aarch64_brk<brk_op>_ptest"
 	  UNSPEC_PTEST))
    (clobber (match_scratch:VNx16BI 0))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3, 4 ]
-     [ Upa     , Upa, Upa,  ,   ] brk<brk_op>s\t%0.b, %1/z, %2.b
+  {@ [ cons: =0, 1  , 2  , 3, 4; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa,  ,  ; yes                 ] brk<brk_op>s\t%0.b, %1/z, %2.b
+     [ Upa     , Upa, Upa,  ,  ; *                   ] ^
   }
 )
 
@@ -10020,8 +10048,9 @@  (define_insn "@aarch64_brk<brk_op>"
 	   (match_operand:VNx16BI 3 "register_operand")]
 	  SVE_BRK_BINARY))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3             ]
-     [ Upa     , Upa, Upa, <brk_reg_con> ] brk<brk_op>\t%0.b, %1/z, %2.b, %<brk_reg_opno>.b
+  {@ [ cons: =0,  1 , 2  , 3            ; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, <brk_reg_con>; yes                 ] brk<brk_op>\t%0.b, %1/z, %2.b, %<brk_reg_opno>.b
+     [ Upa     , Upa, Upa, <brk_reg_con>; *                   ] ^
   }
 )
 
@@ -10046,8 +10075,9 @@  (define_insn_and_rewrite "*aarch64_brkn_cc"
 	   (match_dup 3)]
 	  UNSPEC_BRKN))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3, 4, 5 ]
-     [ Upa     , Upa, Upa, 0,  ,   ] brkns\t%0.b, %1/z, %2.b, %0.b
+  {@ [ cons: =0, 1  , 2  , 3, 4, 5; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, 0,  ,  ; yes                 ] brkns\t%0.b, %1/z, %2.b, %0.b
+     [ Upa     , Upa, Upa, 0,  ,  ; *                   ] ^
   }
   "&& (operands[4] != CONST0_RTX (VNx16BImode)
        || operands[5] != CONST0_RTX (VNx16BImode))"
@@ -10072,8 +10102,9 @@  (define_insn_and_rewrite "*aarch64_brkn_ptest"
 	  UNSPEC_PTEST))
    (clobber (match_scratch:VNx16BI 0))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3, 4, 5 ]
-     [ Upa     , Upa, Upa, 0,  ,   ] brkns\t%0.b, %1/z, %2.b, %0.b
+  {@ [ cons: =0, 1  , 2  , 3, 4, 5; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, 0,  , ; yes                  ] brkns\t%0.b, %1/z, %2.b, %0.b
+     [ Upa     , Upa, Upa, 0,  , ; *                    ] ^
   }
   "&& (operands[4] != CONST0_RTX (VNx16BImode)
        || operands[5] != CONST0_RTX (VNx16BImode))"
@@ -10103,8 +10134,9 @@  (define_insn "*aarch64_brk<brk_op>_cc"
 	   (match_dup 3)]
 	  SVE_BRKP))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3  , 4 ]
-     [ Upa     , Upa, Upa, Upa,   ] brk<brk_op>s\t%0.b, %1/z, %2.b, %3.b
+  {@ [ cons: =0, 1  , 2  , 3  , 4; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, Upa,  ; yes                 ] brk<brk_op>s\t%0.b, %1/z, %2.b, %3.b
+     [ Upa     , Upa, Upa, Upa,  ; *                   ] ^
   }
 )
 
@@ -10123,8 +10155,9 @@  (define_insn "*aarch64_brk<brk_op>_ptest"
 	  UNSPEC_PTEST))
    (clobber (match_scratch:VNx16BI 0))]
   "TARGET_SVE"
-  {@ [ cons: =0, 1  , 2  , 3  , 4 ]
-     [ Upa     , Upa, Upa, Upa,   ] brk<brk_op>s\t%0.b, %1/z, %2.b, %3.b
+  {@ [ cons: =0, 1  , 2  , 3  , 4; attrs: pred_clobber ]
+     [ &Upa    , Upa, Upa, Upa,   ; yes                ] brk<brk_op>s\t%0.b, %1/z, %2.b, %3.b
+     [ Upa     , Upa, Upa, Upa,   ; *                  ] ^
   }
 )
 
diff --git a/gcc/config/aarch64/aarch64-sve2.md b/gcc/config/aarch64/aarch64-sve2.md
index aa12baf48355358ca4fefe88157df3aac6eb09bd..771c346b8a3188dd7e3f3a98ee28f0ca5f928215 100644
--- a/gcc/config/aarch64/aarch64-sve2.md
+++ b/gcc/config/aarch64/aarch64-sve2.md
@@ -3349,8 +3349,9 @@  (define_insn "@aarch64_pred_<sve_int_op><mode>"
 	  UNSPEC_PRED_Z))
    (clobber (reg:CC_NZC CC_REGNUM))]
   "TARGET_SVE2 && TARGET_NON_STREAMING"
-  {@ [ cons: =0, 1 , 2, 3, 4 ]
-     [ Upa     , Upl, , w, w ] <sve_int_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>
+  {@ [ cons: =0, 1 , 2, 3, 4; attrs: pred_clobber ]
+     [ &Upa    , Upl, , w, w; yes                 ] <sve_int_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>
+     [ Upa     , Upl, , w, w; *                   ] ^
   }
 )