diff mbox

[AArch64,4/6] Implement support for Crypto -- SHA1.

Message ID 52A20B12.6020604@arm.com
State New
Headers show

Commit Message

Tejas Belagod Dec. 6, 2013, 5:36 p.m. UTC
Hi,

The attached patch implements support for SHA1 crypto insn.

Tested on aarch64-none-elf. OK for trunk?

Thanks,
Tejas.

2013-12-06  Tejas Belagod  <tejas.belagod@arm.com>

gcc/
	* config/aarch64/aarch64-simd-builtins.def: Update builtins table.
	* config/aarch64/aarch64-simd.md (aarch64_crypto_sha1hsi,
	aarch64_crypto_sha1su1v4si, aarch64_crypto_sha1<sha1_op>v4si,
	aarch64_crypto_sha1su0v4si): New.
	* config/aarch64/arm_neon.h (vsha1cq_u32, sha1mq_u32, vsha1pq_u32,
	vsha1h_u32, vsha1su0q_u32, vsha1su1q_u32): New.
	* config/aarch64/iterators.md (UNSPEC_SHA1<CPMH>. UNSPEC_SHA1SU<01>):
	New.
	(CRYPTO_SHA1): New int iterator.
	(sha1_op): New int attribute.

testsuite/
	* gcc.target/aarch64/sha1.c: New.

Comments

Marcus Shawcroft Dec. 10, 2013, 10:25 a.m. UTC | #1
Same comments as previous patch:

On 6 December 2013 17:36, Tejas Belagod <tbelagod@arm.com> wrote:

> testsuite/
>         * gcc.target/aarch64/sha1.c: New.

Add _1 on the test case file name (see http://gcc.gnu.org/wiki/TestCaseWriting)

> +static __inline uint32x4_t
> +vsha1cq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
> +{
> +  return
> +    (uint32x4_t) __builtin_aarch64_crypto_sha1cv4si ((int32x4_t) hash_abcd,
> +                                                    (int32_t) hash_e,
> +                                                    (int32x4_t) wk);
> +}

James G fixed the infrastructure to allow properly typed builtins, see:

http://gcc.gnu.org/ml/gcc-patches/2013-11/msg02005.html
and
http://gcc.gnu.org/ml/gcc-patches/2013-11/msg02880.html

> +/* { dg-final { scan-assembler "sha1c\\tq" } } */

Use scan-assembler-times 1

Cheers
/Marcus
diff mbox

Patch

diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 49ab482..b0b9bf1 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -367,3 +367,12 @@ 
   VAR1 (BINOP, crypto_aesd, 0, v16qi)
   VAR1 (UNOP, crypto_aesmc, 0, v16qi)
   VAR1 (UNOP, crypto_aesimc, 0, v16qi)
+
+  /* Implemented by aarch64_crypto_sha1<op><mode>.  */
+  VAR1 (UNOP, crypto_sha1h, 0, si)
+  VAR1 (BINOP, crypto_sha1su1, 0, v4si)
+  VAR1 (TERNOP, crypto_sha1c, 0, v4si)
+  VAR1 (TERNOP, crypto_sha1m, 0, v4si)
+  VAR1 (TERNOP, crypto_sha1p, 0, v4si)
+  VAR1 (TERNOP, crypto_sha1su0, 0, v4si)
+
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 4b17748..ab9ad57 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -4096,3 +4096,46 @@ 
   [(set_attr "type" "crypto_aes")]
 )
 
+;; sha1
+
+(define_insn "aarch64_crypto_sha1hsi"
+  [(set (match_operand:SI 0 "register_operand" "=w")
+        (unspec:SI [(match_operand:SI 1
+                       "register_operand" "w")]
+         UNSPEC_SHA1H))]
+  "TARGET_SIMD && TARGET_CRYPTO"
+  "sha1h\\t%s0, %s1"
+  [(set_attr "type" "crypto_sha1_fast")]
+)
+
+(define_insn "aarch64_crypto_sha1su1v4si"
+  [(set (match_operand:V4SI 0 "register_operand" "=w")
+        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+                      (match_operand:V4SI 2 "register_operand" "w")]
+         UNSPEC_SHA1SU1))]
+  "TARGET_SIMD && TARGET_CRYPTO"
+  "sha1su1\\t%0.4s, %2.4s"
+  [(set_attr "type" "crypto_sha1_fast")]
+)
+
+(define_insn "aarch64_crypto_sha1<sha1_op>v4si"
+  [(set (match_operand:V4SI 0 "register_operand" "=w")
+        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+                      (match_operand:SI 2 "register_operand" "w")
+                      (match_operand:V4SI 3 "register_operand" "w")]
+         CRYPTO_SHA1))]
+  "TARGET_SIMD && TARGET_CRYPTO"
+  "sha1<sha1_op>\\t%q0, %s2, %3.4s"
+  [(set_attr "type" "crypto_sha1_slow")]
+)
+
+(define_insn "aarch64_crypto_sha1su0v4si"
+  [(set (match_operand:V4SI 0 "register_operand" "=w")
+        (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+                      (match_operand:V4SI 2 "register_operand" "w")
+                      (match_operand:V4SI 3 "register_operand" "w")]
+         UNSPEC_SHA1SU0))]
+  "TARGET_SIMD && TARGET_CRYPTO"
+  "sha1su0\\t%0.4s, %2.4s, %3.4s"
+  [(set_attr "type" "crypto_sha1_xor")]
+)
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 9f35e09..244abe7 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -23176,6 +23176,58 @@  vrsrad_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
   return (uint64x1_t) __builtin_aarch64_ursra_ndi (__a, __b, __c);
 }
 
+#ifdef __ARM_FEATURE_CRYPTO
+
+/* vsha1  */
+
+static __inline uint32x4_t
+vsha1cq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
+{
+  return
+    (uint32x4_t) __builtin_aarch64_crypto_sha1cv4si ((int32x4_t) hash_abcd,
+						     (int32_t) hash_e,
+						     (int32x4_t) wk);
+}
+static __inline uint32x4_t
+vsha1mq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
+{
+  return
+    (uint32x4_t) __builtin_aarch64_crypto_sha1mv4si ((int32x4_t) hash_abcd,
+						     (int32_t) hash_e,
+						     (int32x4_t) wk);
+}
+static __inline uint32x4_t
+vsha1pq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
+{
+  return
+    (uint32x4_t) __builtin_aarch64_crypto_sha1pv4si ((int32x4_t) hash_abcd,
+						     (int32_t) hash_e,
+						     (int32x4_t) wk);
+}
+
+static __inline uint32_t
+vsha1h_u32 (uint32_t hash_e)
+{
+  return (uint32_t)__builtin_aarch64_crypto_sha1hsi (hash_e);
+}
+
+static __inline uint32x4_t
+vsha1su0q_u32 (uint32x4_t w0_3, uint32x4_t w4_7, uint32x4_t w8_11)
+{
+  return (uint32x4_t) __builtin_aarch64_crypto_sha1su0v4si ((int32x4_t) w0_3,
+							    (int32x4_t) w4_7,
+							    (int32x4_t) w8_11);
+}
+
+static __inline uint32x4_t
+vsha1su1q_u32 (uint32x4_t tw0_3, uint32x4_t w12_15)
+{
+  return (uint32x4_t) __builtin_aarch64_crypto_sha1su1v4si ((int32x4_t) tw0_3,
+							    (int32x4_t) w12_15);
+}
+
+#endif
+
 /* vshl */
 
 __extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 91d6f74..650b503 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -267,6 +267,12 @@ 
     UNSPEC_AESD         ; Used in aarch64-simd.md.
     UNSPEC_AESMC        ; Used in aarch64-simd.md.
     UNSPEC_AESIMC       ; Used in aarch64-simd.md.
+    UNSPEC_SHA1C	; Used in aarch64-simd.md.
+    UNSPEC_SHA1M        ; Used in aarch64-simd.md.
+    UNSPEC_SHA1P        ; Used in aarch64-simd.md.
+    UNSPEC_SHA1H        ; Used in aarch64-simd.md.
+    UNSPEC_SHA1SU0      ; Used in aarch64-simd.md.
+    UNSPEC_SHA1SU1      ; Used in aarch64-simd.md.
 ])
 
 ;; -------------------------------------------------------------------
@@ -850,6 +856,8 @@ 
 (define_int_iterator CRYPTO_AES [UNSPEC_AESE UNSPEC_AESD])
 (define_int_iterator CRYPTO_AESMC [UNSPEC_AESMC UNSPEC_AESIMC])
 
+(define_int_iterator CRYPTO_SHA1 [UNSPEC_SHA1C UNSPEC_SHA1M UNSPEC_SHA1P])
+
 ;; -------------------------------------------------------------------
 ;; Int Iterators Attributes.
 ;; -------------------------------------------------------------------
@@ -970,3 +978,5 @@ 
 (define_int_attr aes_op [(UNSPEC_AESE "e") (UNSPEC_AESD "d")])
 (define_int_attr aesmc_op [(UNSPEC_AESMC "mc") (UNSPEC_AESIMC "imc")])
 
+(define_int_attr sha1_op [(UNSPEC_SHA1C "c") (UNSPEC_SHA1P "p")
+			  (UNSPEC_SHA1M "m")])
diff --git a/gcc/testsuite/gcc.target/aarch64/sha1.c b/gcc/testsuite/gcc.target/aarch64/sha1.c
new file mode 100644
index 0000000..fefd7e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sha1.c
@@ -0,0 +1,55 @@ 
+
+/* { dg-do compile } */
+/* { dg-options "-march=armv8-a+crypto" } */
+
+#include "arm_neon.h"
+
+uint32x4_t
+test_vsha1cq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
+{
+  return vsha1cq_u32 (hash_abcd, hash_e, wk);
+}
+
+/* { dg-final { scan-assembler "sha1c\\tq" } } */
+
+uint32x4_t
+test_vsha1mq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
+{
+  return vsha1mq_u32 (hash_abcd, hash_e, wk);
+}
+
+/* { dg-final { scan-assembler "sha1m\\tq" } } */
+
+uint32x4_t
+test_vsha1pq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
+{
+  return vsha1pq_u32 (hash_abcd, hash_e, wk);
+}
+
+/* { dg-final { scan-assembler "sha1p\\tq" } } */
+
+uint32_t
+test_vsha1h_u32 (uint32_t hash_e)
+{
+  return vsha1h_u32 (hash_e);
+}
+
+/* { dg-final { scan-assembler "sha1h\\ts" } } */
+
+uint32x4_t
+test_vsha1su0q_u32 (uint32x4_t w0_3, uint32x4_t w4_7, uint32x4_t w8_11)
+{
+  return vsha1su0q_u32 (w0_3, w4_7, w8_11);
+}
+
+/* { dg-final { scan-assembler "sha1su0\\tv" } } */
+
+uint32x4_t
+test_vsha1su1q_u32 (uint32x4_t tw0_3, uint32x4_t w12_15)
+{
+  return vsha1su1q_u32 (tw0_3, w12_15);
+}
+
+/* { dg-final { scan-assembler "sha1su1\\tv" } } */
+
+/* { dg-final { cleanup-saved-temps } } */