@@ -468,13 +468,13 @@ (define_insn_reservation "power10-qpmul" 24
(eq_attr "cpu" "power10"))
"DU_super_power10,dfu_power10*8")
-(define_insn_reservation "power10-mffgpr" 2
- (and (eq_attr "type" "mffgpr")
+(define_insn_reservation "power10-mtvsr" 2
+ (and (eq_attr "type" "mtvsr")
(eq_attr "cpu" "power10"))
"DU_slice_3_power10,VSU_power10")
-(define_insn_reservation "power10-mftgpr" 2
- (and (eq_attr "type" "mftgpr")
+(define_insn_reservation "power10-mfvsr" 2
+ (and (eq_attr "type" "mfvsr")
(eq_attr "cpu" "power10"))
"DU_slice_3_power10,VSU_power10")
@@ -56,10 +56,6 @@ (define_reservation "LX2_power6"
(define_reservation "FX2_power6"
"iu1_power6+iu2_power6")
-(define_reservation "X2F_power6"
- "(iu1_power6+iu2_power6+fpu1_power6)\
- |(iu1_power6+iu2_power6+fpu2_power6)")
-
(define_reservation "BX2_power6"
"iu1_power6+iu2_power6+bpu_power6")
@@ -605,20 +601,3 @@ (define_bypass 6 "power6-vecperm" "power6-veccomplex" )
(define_bypass 5 "power6-vecperm" "power6-vecstore" )
-(define_insn_reservation "power6-mftgpr" 8
- (and (eq_attr "type" "mftgpr")
- (eq_attr "cpu" "power6"))
- "X2F_power6")
-
-(define_insn_reservation "power6-mffgpr" 14
- (and (eq_attr "type" "mffgpr")
- (eq_attr "cpu" "power6"))
- "LX2_power6")
-
-(define_bypass 4 "power6-mftgpr" "power6-imul,\
- power6-lmul,\
- power6-imul-cmp,\
- power6-lmul-cmp,\
- power6-imul3,\
- power6-idiv,\
- power6-ldiv" )
@@ -379,13 +379,13 @@ (define_insn_reservation "power8-vecdiv" 31
(eq_attr "cpu" "power8"))
"DU_any_power8,VSU_power8")
-(define_insn_reservation "power8-mffgpr" 5
- (and (eq_attr "type" "mffgpr")
+(define_insn_reservation "power8-mtvsr" 5
+ (and (eq_attr "type" "mtvsr")
(eq_attr "cpu" "power8"))
"DU_any_power8,VSU_power8")
-(define_insn_reservation "power8-mftgpr" 6
- (and (eq_attr "type" "mftgpr")
+(define_insn_reservation "power8-mfvsr" 6
+ (and (eq_attr "type" "mfvsr")
(eq_attr "cpu" "power8"))
"DU_any_power8,VSU_power8")
@@ -466,13 +466,13 @@ (define_insn_reservation "power9-qpmul" 24
(eq_attr "cpu" "power9"))
"DU_super_power9,dfu_power9*8")
-(define_insn_reservation "power9-mffgpr" 2
- (and (eq_attr "type" "mffgpr")
+(define_insn_reservation "power9-mtvsr" 2
+ (and (eq_attr "type" "mtvsr")
(eq_attr "cpu" "power9"))
"DU_slice_3_power9,VSU_power9")
-(define_insn_reservation "power9-mftgpr" 2
- (and (eq_attr "type" "mftgpr")
+(define_insn_reservation "power9-mfvsr" 2
+ (and (eq_attr "type" "mfvsr")
(eq_attr "cpu" "power9"))
"DU_slice_3_power9,VSU_power9")
@@ -17452,14 +17452,6 @@ rs6000_adjust_cost (rtx_insn *insn, int
dep_type, rtx_insn *dep_insn, int cost,
}
break;
- case TYPE_FPLOAD:
- if ((rs6000_tune == PROCESSOR_POWER6)
- && get_attr_update (insn) == UPDATE_NO
- && recog_memoized (dep_insn)
- && (INSN_CODE (dep_insn) >= 0)
- && (get_attr_type (dep_insn) == TYPE_MFFGPR))
- return 2;
-
default:
break;
}
@@ -17485,11 +17477,6 @@ rs6000_adjust_cost (rtx_insn *insn, int
dep_type, rtx_insn *dep_insn, int cost,
|| get_attr_type (dep_insn) == TYPE_FPSIMPLE)
return 1;
break;
- case TYPE_FPLOAD:
- if (get_attr_update (insn) == UPDATE_NO
- && get_attr_type (dep_insn) == TYPE_MFFGPR)
- return 2;
- break;
default:
break;
}
@@ -202,7 +202,7 @@ (define_attr "type"
cr_logical,mfcr,mfcrf,mtcr,
fpcompare,fp,fpsimple,dmul,qmul,sdiv,ddiv,ssqrt,dsqrt,
vecsimple,veccomplex,vecdiv,veccmp,veccmpsimple,vecperm,
- vecfloat,vecfdiv,vecdouble,mffgpr,mftgpr,crypto,
+ vecfloat,vecfdiv,vecdouble,mtvsr,mfvsr,crypto,
veclogical,veccmpfx,vecexts,vecmove,
htm,htmsimple,dfp,mma"
(const_string "integer"))
@@ -921,7 +921,7 @@ (define_insn "zero_extendsi<mode>2"
mtvsrwz %x0,%1
mfvsrwz %0,%x1
xxextractuw %x0,%x1,4"
- [(set_attr "type" "load,shift,fpload,fpload,mffgpr,mftgpr,vecexts")
+ [(set_attr "type" "load,shift,fpload,fpload,mtvsr,mfvsr,vecexts")
(set_attr "isa" "*,*,p7,p8v,p8v,p8v,p9v")])
(define_insn_and_split "*zero_extendsi<mode>2_dot"
@@ -1108,7 +1108,7 @@ (define_insn "extendsi<mode>2"
vextsw2d %0,%1
#
#"
- [(set_attr "type"
"load,exts,fpload,fpload,mffgpr,vecexts,vecperm,mftgpr")
+ [(set_attr "type" "load,exts,fpload,fpload,mtvsr,vecexts,vecperm,mfvsr")
(set_attr "sign_extend" "yes")
(set_attr "length" "*,*,*,*,*,*,8,8")
(set_attr "isa" "*,*,p6,p8v,p8v,p9v,p8v,p8v")])
@@ -5048,7 +5048,7 @@ (define_insn_and_split "@signbit<mode>2_dm"
{
operands[2] = gen_highpart (DImode, operands[1]);
}
- [(set_attr "type" "mftgpr,*")])
+ [(set_attr "type" "mfvsr,*")])
;; Optimize IEEE 128-bit signbit on to avoid loading the value into a
vector
;; register and then doing a direct move if the value comes from
memory. On
@@ -5402,7 +5402,7 @@ (define_insn "lfiwax"
lxsiwax %x0,%y1
mtvsrwa %x0,%1
vextsw2d %0,%1"
- [(set_attr "type" "fpload,fpload,mffgpr,vecexts")
+ [(set_attr "type" "fpload,fpload,mtvsr,vecexts")
(set_attr "isa" "*,p8v,p8v,p9v")])
; This split must be run before register allocation because it
allocates the
@@ -5483,7 +5483,7 @@ (define_insn "lfiwzx"
lxsiwzx %x0,%y1
mtvsrwz %x0,%1
xxextractuw %x0,%x1,4"
- [(set_attr "type" "fpload,fpload,mffgpr,vecexts")
+ [(set_attr "type" "fpload,fpload,mtvsr,vecexts")
(set_attr "isa" "*,p8v,p8v,p9v")])
(define_insn_and_split "floatunssi<mode>2_lfiwzx"
@@ -7087,7 +7087,7 @@ (define_insn "*movsi_internal1"
*, *, *, *,
veclogical, vecsimple, vecsimple, vecsimple,
veclogical, veclogical, vecsimple,
- mffgpr, mftgpr,
+ mtvsr, mfvsr,
*, *, *")
(set_attr "length"
"*, *,
@@ -7173,8 +7173,8 @@ (define_insn_and_split "movsi_from_sf"
}
[(set_attr "type"
"*, load, fpload, fpload, store,
- fpstore, fpstore, fpstore, mftgpr, fp,
- mffgpr")
+ fpstore, fpstore, fpstore, mfvsr, fp,
+ mtvsr")
(set_attr "length"
"*, *, *, *, *,
*, *, *, 8, *,
@@ -7228,7 +7228,7 @@ (define_insn_and_split "*movdi_from_sf_zero_ext"
}
[(set_attr "type"
"*, load, fpload, fpload, two,
- two, mffgpr")
+ two, mtvsr")
(set_attr "length"
"*, *, *, *, 8,
8, *")
@@ -7355,8 +7355,8 @@ (define_insn "*mov<mode>_internal"
nop"
[(set_attr "type"
"*, load, fpload, store, fpstore, *,
- vecsimple, vecperm, vecperm, vecperm, vecperm, mftgpr,
- mffgpr, mfjmpr, mtjmpr, *")
+ vecsimple, vecperm, vecperm, vecperm, vecperm, mfvsr,
+ mtvsr, mfjmpr, mtjmpr, *")
(set_attr "length"
"*, *, *, *, *, *,
*, *, *, *, 8, *,
@@ -7531,7 +7531,7 @@ (define_insn "movsd_hardfloat"
mf%1 %0
nop"
[(set_attr "type"
- "load, fpload, store, fpstore, mffgpr, mftgpr,
+ "load, fpload, store, fpstore, mtvsr, mfvsr,
fpsimple, *, mtjmpr, mfjmpr, *")
(set_attr "isa"
"*, p7, *, *, p8v, p8v,
@@ -7634,7 +7634,7 @@ (define_insn_and_split "movsf_from_si"
*, 12, *, *")
(set_attr "type"
"load, fpload, fpload, fpload, store, fpstore,
- fpstore, vecfloat, mftgpr, *")
+ fpstore, vecfloat, mfvsr, *")
(set_attr "isa"
"*, *, p9v, p8v, *, *,
p8v, p8v, p8v, *")])
@@ -7871,7 +7871,7 @@ (define_insn "*mov<mode>_hardfloat64"
"fpstore, fpload, fpsimple, fpload, fpstore,
fpload, fpstore, veclogical, veclogical, integer,
store, load, *, mtjmpr, mfjmpr,
- *, mftgpr, mffgpr")
+ *, mfvsr, mtvsr")
(set_attr "size" "64")
(set_attr "isa"
"*, *, *, p9v, p9v,
@@ -8711,7 +8711,7 @@ (define_insn "p8_mtvsrwz"
UNSPEC_P8V_MTVSRWZ))]
"!TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mtvsrwz %x0,%1"
- [(set_attr "type" "mffgpr")])
+ [(set_attr "type" "mtvsr")])
(define_insn_and_split "reload_fpr_from_gpr<mode>"
[(set (match_operand:FMOVE64X 0 "register_operand" "=d")
@@ -8745,7 +8745,7 @@ (define_insn "p8_mtvsrd_df"
UNSPEC_P8V_MTVSRD))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mtvsrd %x0,%1"
- [(set_attr "type" "mftgpr")])
+ [(set_attr "type" "mfvsr")])
(define_insn "p8_xxpermdi_<mode>"
[(set (match_operand:FMOVE128_GPR 0 "register_operand" "=wa")
@@ -8810,7 +8810,7 @@ (define_insn "p8_mtvsrd_sf"
UNSPEC_P8V_MTVSRD))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mtvsrd %x0,%1"
- [(set_attr "type" "mffgpr")])
+ [(set_attr "type" "mtvsr")])
(define_insn_and_split "reload_vsx_from_gprsf"
[(set (match_operand:SF 0 "register_operand" "=wa")
@@ -8845,7 +8845,7 @@ (define_insn "p8_mfvsrd_3_<mode>"
UNSPEC_P8V_RELOAD_FROM_VSX))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mfvsrd %0,%x1"
- [(set_attr "type" "mftgpr")])
+ [(set_attr "type" "mfvsr")])
(define_insn_and_split "reload_gpr_from_vsx<mode>"
[(set (match_operand:FMOVE128_GPR 0 "register_operand" "=r")
@@ -9060,7 +9060,7 @@ (define_insn "*movdi_internal64"
vecsimple, vecsimple, vecsimple, veclogical, veclogical,
vecsimple, vecsimple,
mfjmpr, mtjmpr, *,
- mftgpr, mffgpr")
+ mfvsr, mtvsr")
(set_attr "size" "64")
(set_attr "length"
"*, *, *,
@@ -14017,7 +14017,7 @@ (define_insn_and_split "unpack<mode>_dm"
operands[3] = gen_rtx_REG (<FP128_64>mode, fp_regno);
}
- [(set_attr "type" "fp,fpstore,mffgpr,mftgpr,store")])
+ [(set_attr "type" "fp,fpstore,mtvsr,mfvsr,store")])
(define_insn_and_split "unpack<mode>_nodm"
[(set (match_operand:<FP128_64> 0 "nonimmediate_operand" "=d,m")
@@ -1173,7 +1173,7 @@ (define_insn "vsx_mov<mode>_64bit"
return rs6000_output_move_128bit (operands);
}
[(set_attr "type"
- "vecstore, vecload, vecsimple, mffgpr, mftgpr,
load,
+ "vecstore, vecload, vecsimple, mtvsr, mfvsr,
load,
store, load, store, *, vecsimple,