@@ -1291,6 +1291,18 @@
[(set_attr "type" "neon_shift_imm_narrow_q")]
)
+(define_insn "aarch64_simd_vec_pack_trunc_hi_<mode>"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w"))
+ (vec_select:<VNARROWQ>
+ (match_operand:<VNARROWQ2> 3 "register_operand" "0")
+ (match_operand:<VNARROWQ2> 2 "vect_par_cnst_hi_half" ""))))]
+ "TARGET_SIMD"
+ "xtn2\\t%0.<V2ntype>, %1.<Vtype>"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
(define_expand "vec_pack_trunc_<mode>"
[(match_operand:<VNARROWD> 0 "register_operand" "")
(match_operand:VDN 1 "register_operand" "")
@@ -1309,17 +1321,41 @@
;; For quads.
-(define_insn "vec_pack_trunc_<mode>"
+(define_insn_and_split "vec_pack_trunc_<mode>"
[(set (match_operand:<VNARROWQ2> 0 "register_operand" "=&w")
(vec_concat:<VNARROWQ2>
(truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w"))
(truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))]
"TARGET_SIMD"
+ "#"
+ ""
+ [(const_int 0)]
{
if (BYTES_BIG_ENDIAN)
- return "xtn\\t%0.<Vntype>, %2.<Vtype>\;xtn2\\t%0.<V2ntype>, %1.<Vtype>";
+ {
+ rtx low_part = gen_lowpart (<VNARROWQ>mode, operands[0]);
+ emit_insn (gen_aarch64_simd_vec_pack_trunc_<mode> (low_part,
+ operands[2]));
+ rtx high_part = aarch64_simd_vect_par_cnst_half (<VNARROWQ2>mode,
+ true);
+ emit_insn (gen_aarch64_simd_vec_pack_trunc_hi_<mode> (operands[0],
+ operands[1],
+ high_part,
+ operands[0]));
+ }
else
- return "xtn\\t%0.<Vntype>, %1.<Vtype>\;xtn2\\t%0.<V2ntype>, %2.<Vtype>";
+ {
+ rtx low_part = gen_lowpart (<VNARROWQ>mode, operands[0]);
+ emit_insn (gen_aarch64_simd_vec_pack_trunc_<mode> (low_part,
+ operands[1]));
+ rtx high_part = aarch64_simd_vect_par_cnst_half (<VNARROWQ2>mode,
+ true);
+ emit_insn (gen_aarch64_simd_vec_pack_trunc_hi_<mode> (operands[0],
+ operands[2],
+ high_part,
+ operands[0]));
+ }
+ DONE;
}
[(set_attr "type" "multiple")
(set_attr "length" "8")]