diff mbox

[AArch64] Optimize x * copysign (1.0, y) [Patch (2/2)]

Message ID VI1PR0801MB2031E8D36B1D7B456866D71EFFCD0@VI1PR0801MB2031.eurprd08.prod.outlook.com
State New
Headers show

Commit Message

Tamar Christina June 12, 2017, 7:56 a.m. UTC
Hi All,

this patch implements a optimization rewriting

x * copysign (1.0, y) and 
x * copysign (-1.0, y) 

to:

x ^ (y & (1 << sign_bit_position))

The patch provides AArch64 optabs for XORSIGN, both vectorized and scalar.

This patch is a revival of a previous patch
https://gcc.gnu.org/ml/gcc-patches/2015-10/msg00069.html

Bootstrapped on both aarch64-none-linux-gnu and x86_64 with no issues.
Regression done on aarch64-none-linux-gnu and no regressions.

AArch64 now generates in GCC:

	movi	v2.2s, 0x80, lsl 24
	and	v1.8b, v1.8b, v2.8b
	eor	v0.8b, v0.8b, v1.8b

as opposed to before:

	fmov	s2, 1.0e+0
	mov	x0, 2147483648
	fmov	d3, x0
	bsl	v3.8b, v1.8b, v2.8b
	fmul	s0, s0, s3

Ok for trunk?

gcc/
2017-06-07  Tamar Christina  <tamar.christina@arm.com>


	* config/aarch64/aarch64.md (xorsign<mode>3): New optabs.
	* config/aarch64/aarch64-builtins.c
	(aarch64_builtin_vectorized_function): Added CASE_CFN_XORSIGN.
	* config/aarch64/aarch64-simd-builtins.def: Added xorsign BINOP.
	* config/aarch64/aarch64-simd.md: Added xorsign<mode>3.
diff mbox

Patch

diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index f09399f4c158112c90c270856bffb4cafd03e7d4..8a2e214db2bd590fc809cf8c58bfe4aca2af9bef 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -1432,6 +1432,15 @@  aarch64_builtin_vectorized_function (unsigned int fn, tree type_out,
       return AARCH64_FIND_FRINT_VARIANT (nearbyint);
     CASE_CFN_SQRT:
       return AARCH64_FIND_FRINT_VARIANT (sqrt);
+    CASE_CFN_XORSIGN:
+      if (AARCH64_CHECK_BUILTIN_MODE (2, S))
+	return aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_BINOP_xorsignv2sf];
+      else if (AARCH64_CHECK_BUILTIN_MODE (4, S))
+	return aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_BINOP_xorsignv4sf];
+      else if (AARCH64_CHECK_BUILTIN_MODE (2, D))
+	return aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_BINOP_xorsignv2df];
+      else
+	return NULL_TREE;
 #undef AARCH64_CHECK_BUILTIN_MODE
 #define AARCH64_CHECK_BUILTIN_MODE(C, N) \
   (out_mode == SImode && out_n == C \
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index d713d5d8b88837ec6f2dc51188fb252f8d5bc8bd..b7f50b849dba8d788be142cd839c4a5560e9204e 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -151,6 +151,9 @@ 
   BUILTIN_VQN (TERNOP, raddhn2, 0)
   BUILTIN_VQN (TERNOP, rsubhn2, 0)
 
+  /* Implemented by xorsign<mode>3.  */
+  BUILTIN_VHSDF (BINOP, xorsign, 3)
+
   BUILTIN_VSQN_HSDI (UNOP, sqmovun, 0)
   /* Implemented by aarch64_<sur>qmovn<mode>.  */
   BUILTIN_VSQN_HSDI (UNOP, sqmovn, 0)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index c5a86ff6f7196eb634be426ecea97cdfbfc7a7a4..1e92fa1b54a592db5dde9048e51988c03ece141c 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -351,6 +351,35 @@ 
   }
 )
 
+(define_expand "xorsign<mode>3"
+  [(match_operand:VHSDF 0 "register_operand")
+   (match_operand:VHSDF 1 "register_operand")
+   (match_operand:VHSDF 2 "register_operand")]
+  "TARGET_SIMD"
+{
+
+  machine_mode imode = <V_cmp_result>mode;
+  rtx v_bitmask = gen_reg_rtx (imode);
+  rtx op1x = gen_reg_rtx (imode);
+  rtx op2x = gen_reg_rtx (imode);
+
+  rtx arg1 = lowpart_subreg (imode, operands[1], <MODE>mode);
+  rtx arg2 = lowpart_subreg (imode, operands[2], <MODE>mode);
+
+  int bits = GET_MODE_UNIT_BITSIZE (<MODE>mode) - 1;
+
+  emit_move_insn (v_bitmask,
+		  aarch64_simd_gen_const_vector_dup (<V_cmp_result>mode,
+						     HOST_WIDE_INT_M1U << bits));
+
+  emit_insn (gen_and<v_cmp_result>3 (op2x, v_bitmask, arg2));
+  emit_insn (gen_xor<v_cmp_result>3 (op1x, arg1, op2x));
+  emit_move_insn (operands[0],
+		  lowpart_subreg (<MODE>mode, op1x, imode));
+  DONE;
+}
+)
+
 (define_expand "copysign<mode>3"
   [(match_operand:VHSDF 0 "register_operand")
    (match_operand:VHSDF 1 "register_operand")
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 2e9331fd72b3f36270b8741d97fb3275b4bf2657..8ecdae41a2f4ec42cf28dc6309f3e69fe74ba39d 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -4988,6 +4988,42 @@ 
 }
 )
 
+;; For xorsign (x, y), we want to generate:
+;;
+;; LDR   d2, #1<<63
+;; AND   v3.8B, v1.8B, v2.8B
+;; EOR   v0.8B, v0.8B, v3.8B
+;;
+
+(define_expand "xorsign<mode>3"
+  [(match_operand:GPF 0 "register_operand")
+   (match_operand:GPF 1 "register_operand")
+   (match_operand:GPF 2 "register_operand")]
+  "TARGET_FLOAT && TARGET_SIMD"
+{
+
+  machine_mode imode = <V_cmp_result>mode;
+  rtx mask = gen_reg_rtx (imode);
+  rtx op1x = gen_reg_rtx (imode);
+  rtx op2x = gen_reg_rtx (imode);
+
+  int bits = GET_MODE_BITSIZE (<MODE>mode) - 1;
+  emit_move_insn (mask, GEN_INT (trunc_int_for_mode (HOST_WIDE_INT_M1U << bits,
+						     imode)));
+
+  emit_insn (gen_and<v_cmp_result>3 (op2x, mask,
+				     lowpart_subreg (imode, operands[2],
+						     <MODE>mode)));
+  emit_insn (gen_xor<v_cmp_result>3 (op1x,
+				     lowpart_subreg (imode, operands[1],
+						     <MODE>mode),
+				     op2x));
+  emit_move_insn (operands[0],
+		  lowpart_subreg (<MODE>mode, op1x, imode));
+  DONE;
+}
+)
+
 ;; -------------------------------------------------------------------
 ;; Reload support
 ;; -------------------------------------------------------------------