@@ -245,6 +245,10 @@ mpass-mrelax-to-as
Target Var(loongarch_pass_mrelax_to_as) Init(HAVE_AS_MRELAX_OPTION)
Pass -mrelax or -mno-relax option to the assembler.
+muse-movcf2gr
+Target Var(loongarch_use_movcf2gr) Init(M_OPT_UNSET)
+Emit the movcf2gr instruction.
+
-param=loongarch-vect-unroll-limit=
Target Joined UInteger Var(loongarch_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param
Used to limit unroll factor which indicates how much the autovectorizer may
@@ -36,6 +36,8 @@ using array_tune = array<T, N_TUNE_TYPES>;
template <class T>
using array_arch = array<T, N_ARCH_TYPES>;
+const int simple_insn_cost = COSTS_N_INSNS (1);
+
/* CPU property tables. */
array_tune<const char *> loongarch_cpu_strings = array_tune<const char *> ()
.set (CPU_NATIVE, STR_CPU_NATIVE)
@@ -101,15 +103,18 @@ loongarch_rtx_cost_data::loongarch_rtx_cost_data ()
int_mult_di (COSTS_N_INSNS (4)),
int_div_si (COSTS_N_INSNS (5)),
int_div_di (COSTS_N_INSNS (5)),
+ movcf2gr (COSTS_N_INSNS (7)),
branch_cost (6),
memory_latency (4) {}
/* The following properties cannot be looked up directly using "cpucfg".
So it is necessary to provide a default value for "unknown native"
tune targets (i.e. -mtune=native while PRID does not correspond to
- any known "-mtune" type). Currently all numbers are default. */
+ any known "-mtune" type). */
array_tune<loongarch_rtx_cost_data> loongarch_cpu_rtx_cost_data =
- array_tune<loongarch_rtx_cost_data> ();
+ array_tune<loongarch_rtx_cost_data> ()
+ .set (CPU_LA664,
+ loongarch_rtx_cost_data ().movcf2gr_ (COSTS_N_INSNS (1)));
/* RTX costs to use when optimizing for size.
We use a value slightly larger than COSTS_N_INSNS (1) for all of them
@@ -125,7 +130,8 @@ const loongarch_rtx_cost_data loongarch_rtx_cost_optimize_size =
.int_mult_si_ (COST_COMPLEX_INSN)
.int_mult_di_ (COST_COMPLEX_INSN)
.int_div_si_ (COST_COMPLEX_INSN)
- .int_div_di_ (COST_COMPLEX_INSN);
+ .int_div_di_ (COST_COMPLEX_INSN)
+ .movcf2gr_ (COST_COMPLEX_INSN);
array_tune<int> loongarch_cpu_issue_rate = array_tune<int> ()
.set (CPU_NATIVE, 4)
@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3. If not see
#include "loongarch-def-array.h"
+extern const int simple_insn_cost;
+
/* RTX costs of various operations on the different architectures. */
struct loongarch_rtx_cost_data
{
@@ -35,6 +37,7 @@ struct loongarch_rtx_cost_data
unsigned short int_mult_di;
unsigned short int_div_si;
unsigned short int_div_di;
+ unsigned short movcf2gr;
unsigned short branch_cost;
unsigned short memory_latency;
@@ -95,6 +98,12 @@ struct loongarch_rtx_cost_data
return *this;
}
+ loongarch_rtx_cost_data movcf2gr_ (unsigned short _movcf2gr)
+ {
+ movcf2gr = _movcf2gr;
+ return *this;
+ }
+
loongarch_rtx_cost_data branch_cost_ (unsigned short _branch_cost)
{
branch_cost = _branch_cost;
@@ -107,6 +116,11 @@ struct loongarch_rtx_cost_data
return *this;
}
+ bool use_movcf2gr () const
+ {
+ /* If movcf2gr is cheaper than two li.w and a branch, use it. */
+ return movcf2gr <= simple_insn_cost * 2 + branch_cost;
+ }
};
/* Costs to use when optimizing for size. */
@@ -7528,6 +7528,9 @@ loongarch_option_override_internal (struct gcc_options *opts,
else
loongarch_cost = &loongarch_cpu_rtx_cost_data[la_target.cpu_tune];
+ if (loongarch_use_movcf2gr == M_OPT_UNSET)
+ loongarch_use_movcf2gr = loongarch_cost->use_movcf2gr ();
+
/* If the user hasn't specified a branch cost, use the processor's
default. */
if (loongarch_branch_cost == 0)
@@ -3169,6 +3169,42 @@ (define_insn "s<code>_<ANYF:mode>_using_FCCmode"
[(set_attr "type" "fcmp")
(set_attr "mode" "FCC")])
+(define_insn "movcf2gr<GPR:mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=r")
+ (if_then_else:GPR (ne (match_operand:FCC 1 "register_operand" "z")
+ (const_int 0))
+ (const_int 1)
+ (const_int 0)))]
+ "TARGET_HARD_FLOAT && loongarch_use_movcf2gr"
+ "movcf2gr\t%0,%1"
+ [(set_attr "type" "move")
+ (set_attr "mode" "FCC")])
+
+(define_expand "cstore<ANYF:mode>4"
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "loongarch_fcmp_operator"
+ [(match_operand:ANYF 2 "register_operand")
+ (match_operand:ANYF 3 "register_operand")]))]
+ "loongarch_use_movcf2gr"
+ {
+ rtx fcc = gen_reg_rtx (FCCmode);
+ rtx cmp = gen_rtx_fmt_ee (GET_CODE (operands[1]), FCCmode,
+ operands[2], operands[3]);
+
+ emit_insn (gen_rtx_SET (fcc, cmp));
+ if (TARGET_64BIT)
+ {
+ rtx gpr = gen_reg_rtx (DImode);
+ emit_insn (gen_movcf2grdi (gpr, fcc));
+ emit_insn (gen_rtx_SET (operands[0],
+ lowpart_subreg (SImode, gpr, DImode)));
+ }
+ else
+ emit_insn (gen_movcf2grsi (operands[0], fcc));
+
+ DONE;
+ })
+
;;
;; ....................
@@ -253,6 +253,10 @@ mpass-mrelax-to-as
Target Var(loongarch_pass_mrelax_to_as) Init(HAVE_AS_MRELAX_OPTION)
Pass -mrelax or -mno-relax option to the assembler.
+muse-movcf2gr
+Target Var(loongarch_use_movcf2gr) Init(M_OPT_UNSET)
+Emit the movcf2gr instruction.
+
-param=loongarch-vect-unroll-limit=
Target Joined UInteger Var(loongarch_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param
Used to limit unroll factor which indicates how much the autovectorizer may
@@ -590,6 +590,10 @@ (define_predicate "order_operator"
(define_predicate "loongarch_cstore_operator"
(match_code "ne,eq,gt,gtu,ge,geu,lt,ltu,le,leu"))
+(define_predicate "loongarch_fcmp_operator"
+ (match_code
+ "unordered,uneq,unlt,unle,eq,lt,le,ordered,ltgt,ne,ge,gt,unge,ungt"))
+
(define_predicate "small_data_pattern"
(and (match_code "set,parallel,unspec,unspec_volatile,prefetch")
(match_test "loongarch_small_data_pattern_p (op)")))
@@ -26811,6 +26811,14 @@ Enable the approximation for vectorized reciprocal square root.
So, for example, @option{-mrecip=all,!sqrt} enables
all of the reciprocal approximations, except for scalar square root.
+@item -muse-movcf2gr
+@itemx -mno-use-movcf2gr
+Use (do not use) the @code{movcf2gr} instruction. The default is
+dependent on the setting of @option{-mtune=} option:
+@option{-muse-movcf2gr} if tuning for a microarchitecture where the
+@code{movcf2gr} instruction is faster than a @code{bceqz} or @code{bcnez}
+branch setting a GPR to 0 or 1; @option{-mno-use-movcf2gr} otherwise.
+
@item loongarch-vect-unroll-limit
The vectorizer will use available tuning information to determine whether it
would be beneficial to unroll the main vectorized loop and by how much. This
new file mode 100644
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=loongarch64 -mtune=la664 -mabi=lp64d" } */
+/* { dg-final { scan-assembler "movcf2gr\t\\\$r4,\\\$fcc" } } */
+
+int
+t (float a, float b)
+{
+ return a > b;
+}