diff mbox series

[5/7] arm: suppress aes erratum when forwarding from aes

Message ID 20220120112724.830872-6-rearnsha@arm.com
State New
Headers show
Series Arm: mitigation for AES erratum on Cortex-a57 and Cortex-A72 | expand

Commit Message

Richard Earnshaw Jan. 20, 2022, 11:27 a.m. UTC
AES operations are commonly chained and since the result of one AES
operation is never a 32-bit value, they do not need an additional
mitigation instruction for the forwarded result.  We handle this
common case by adding additional patterns that allow for this.

gcc/ChangeLog:

	* config/arm/crypto.md (crypto_<CRYPTO_AESMC:crypto_pattern>_protected):
	New pattern.
	(aarch32_crypto_aese_fused_protected): Likewise.
	(aarch32_crypto_aesd_fused_protected): Likewise.
---
 gcc/config/arm/crypto.md | 50 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 50 insertions(+)
diff mbox series

Patch

diff --git a/gcc/config/arm/crypto.md b/gcc/config/arm/crypto.md
index fbee1829ce8..df857352382 100644
--- a/gcc/config/arm/crypto.md
+++ b/gcc/config/arm/crypto.md
@@ -75,6 +75,20 @@  (define_insn "aes_op_protect"
   [(set_attr "type" "neon_move_q")]
 )
 
+;; An AESMC operation can feed directly into a subsequent AES
+;; operation without needing mitigation.
+(define_insn "*crypto_<CRYPTO_AESMC:crypto_pattern>_protected"
+  [(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
+	(unspec:<crypto_mode>
+	 [(unspec:<crypto_mode>
+	   [(match_operand:<crypto_mode> 1 "register_operand" "w")]
+	   CRYPTO_AESMC)]
+	 UNSPEC_AES_PROTECT))]
+  "TARGET_CRYPTO && fix_aes_erratum_1742098"
+  "<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q1"
+  [(set_attr "type" "<crypto_type>")]
+)
+
 ;; When AESE/AESMC fusion is enabled we really want to keep the two together
 ;; and enforce the register dependency without scheduling or register
 ;; allocation messing up the order or introducing moves inbetween.
@@ -95,6 +109,25 @@  (define_insn "*aarch32_crypto_aese_fused"
    (set_attr "length" "8")]
 )
 
+;; And similarly when mitigation is enabled, but not needed in this
+;; case.
+(define_insn "*aarch32_crypto_aese_fused_protected"
+  [(set (match_operand:V16QI 0 "register_operand" "=w")
+	(unspec:V16QI
+	 [(unspec:V16QI
+	   [(unspec:V16QI [(xor:V16QI
+			    (match_operand:V16QI 1 "register_operand" "%0")
+			    (match_operand:V16QI 2 "register_operand" "w"))]
+	     UNSPEC_AESE)]
+	   UNSPEC_AESMC)]
+	 UNSPEC_AES_PROTECT))]
+  "TARGET_CRYPTO && fix_aes_erratum_1742098
+   && arm_fusion_enabled_p (tune_params::FUSE_AES_AESMC)"
+  "aese.8\\t%q0, %q2\;aesmc.8\\t%q0, %q0"
+  [(set_attr "type" "crypto_aese")
+   (set_attr "length" "8")]
+)
+
 ;; When AESD/AESIMC fusion is enabled we really want to keep the two together
 ;; and enforce the register dependency without scheduling or register
 ;; allocation messing up the order or introducing moves inbetween.
@@ -115,6 +148,23 @@  (define_insn "*aarch32_crypto_aesd_fused"
    (set_attr "length" "8")]
 )
 
+(define_insn "*aarch32_crypto_aesd_fused_protected"
+  [(set (match_operand:V16QI 0 "register_operand" "=w")
+	(unspec:V16QI
+	 [(unspec:V16QI
+	   [(unspec:V16QI [(xor:V16QI
+			    (match_operand:V16QI 1 "register_operand" "%0")
+			    (match_operand:V16QI 2 "register_operand" "w"))]
+	     UNSPEC_AESD)]
+	   UNSPEC_AESIMC)]
+	 UNSPEC_AES_PROTECT))]
+  "TARGET_CRYPTO && fix_aes_erratum_1742098
+   && arm_fusion_enabled_p (tune_params::FUSE_AES_AESMC)"
+  "aesd.8\\t%q0, %q2\;aesimc.8\\t%q0, %q0"
+  [(set_attr "type" "crypto_aese")
+   (set_attr "length" "8")]
+)
+
 (define_insn "crypto_<CRYPTO_BINARY:crypto_pattern>"
   [(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
 	(unspec:<crypto_mode>