@@ -313,7 +313,7 @@ verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
{
tree lhs_type;
- lhs_type = gimple_expr_type (stmt);
+ lhs_type = TREE_TYPE (gimple_get_lhs (stmt));
if (TREE_CODE (lhs_type) != INTEGER_TYPE
&& TREE_CODE (lhs_type) != ENUMERAL_TYPE)
@@ -702,7 +702,7 @@ find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
int i, type_size, old_type_size;
tree type;
- type = gimple_expr_type (stmt);
+ type = TREE_TYPE (gimple_assign_lhs (stmt));
type_size = TYPE_PRECISION (type);
if (type_size % BITS_PER_UNIT != 0)
return NULL;
@@ -851,7 +851,7 @@ find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg,
gimple *
find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
{
- tree type_size = TYPE_SIZE_UNIT (gimple_expr_type (stmt));
+ tree type_size = TYPE_SIZE_UNIT (TREE_TYPE (gimple_get_lhs (stmt)));
if (!tree_fits_uhwi_p (type_size))
return NULL;
@@ -116,11 +116,8 @@ vect_lanes_optab_supported_p (const char *name, convert_optab optab,
types. */
tree
-vect_get_smallest_scalar_type (stmt_vec_info stmt_info,
- HOST_WIDE_INT *lhs_size_unit,
- HOST_WIDE_INT *rhs_size_unit)
+vect_get_smallest_scalar_type (stmt_vec_info stmt_info, tree scalar_type)
{
- tree scalar_type = gimple_expr_type (stmt_info->stmt);
HOST_WIDE_INT lhs, rhs;
/* During the analysis phase, this function is called on arbitrary
@@ -131,21 +128,24 @@ vect_get_smallest_scalar_type (stmt_vec_info stmt_info,
lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
- if (assign
- && (gimple_assign_cast_p (assign)
+ if (assign)
+ {
+ scalar_type = TREE_TYPE (gimple_assign_lhs (assign));
+ if (gimple_assign_cast_p (assign)
|| gimple_assign_rhs_code (assign) == DOT_PROD_EXPR
|| gimple_assign_rhs_code (assign) == WIDEN_SUM_EXPR
|| gimple_assign_rhs_code (assign) == WIDEN_MULT_EXPR
|| gimple_assign_rhs_code (assign) == WIDEN_LSHIFT_EXPR
|| gimple_assign_rhs_code (assign) == WIDEN_PLUS_EXPR
|| gimple_assign_rhs_code (assign) == WIDEN_MINUS_EXPR
- || gimple_assign_rhs_code (assign) == FLOAT_EXPR))
- {
- tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
+ || gimple_assign_rhs_code (assign) == FLOAT_EXPR)
+ {
+ tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
- rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
- if (rhs < lhs)
- scalar_type = rhs_type;
+ rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
+ if (rhs < lhs)
+ scalar_type = rhs_type;
+ }
}
else if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
{
@@ -153,10 +153,16 @@ vect_get_smallest_scalar_type (stmt_vec_info stmt_info,
if (gimple_call_internal_p (call))
{
internal_fn ifn = gimple_call_internal_fn (call);
- if (internal_load_fn_p (ifn) || internal_store_fn_p (ifn))
- /* gimple_expr_type already picked the type of the loaded
- or stored data. */
+ if (internal_load_fn_p (ifn))
+ /* For loads the LHS type does the trick. */
i = ~0U;
+ else if (internal_store_fn_p (ifn))
+ {
+ /* For stores use the tyep of the stored value. */
+ i = internal_fn_stored_value_index (ifn);
+ scalar_type = TREE_TYPE (gimple_call_arg (call, i));
+ i = ~0U;
+ }
else if (internal_fn_mask_index (ifn) == 0)
i = 1;
}
@@ -172,8 +178,6 @@ vect_get_smallest_scalar_type (stmt_vec_info stmt_info,
}
}
- *lhs_size_unit = lhs;
- *rhs_size_unit = rhs;
return scalar_type;
}
@@ -1017,7 +1017,7 @@ static bool
expand_vector_condition (gimple_stmt_iterator *gsi, bitmap dce_ssa_names)
{
gassign *stmt = as_a <gassign *> (gsi_stmt (*gsi));
- tree type = gimple_expr_type (stmt);
+ tree type = TREE_TYPE (gimple_assign_lhs (stmt));
tree a = gimple_assign_rhs1 (stmt);
tree a1 = a;
tree a2 = NULL_TREE;
@@ -1745,11 +1745,11 @@ static void
expand_vector_scalar_condition (gimple_stmt_iterator *gsi)
{
gassign *stmt = as_a <gassign *> (gsi_stmt (*gsi));
- tree type = gimple_expr_type (stmt);
+ tree lhs = gimple_assign_lhs (stmt);
+ tree type = TREE_TYPE (lhs);
tree compute_type = get_compute_type (COND_EXPR, mov_optab, type);
machine_mode compute_mode = TYPE_MODE (compute_type);
gcc_assert (compute_mode != BLKmode);
- tree lhs = gimple_assign_lhs (stmt);
tree rhs2 = gimple_assign_rhs2 (stmt);
tree rhs3 = gimple_assign_rhs3 (stmt);
tree new_rhs;
@@ -2129,10 +2129,10 @@ expand_vector_operations_1 (gimple_stmt_iterator *gsi,
return;
rhs1 = gimple_assign_rhs1 (stmt);
- type = gimple_expr_type (stmt);
if (rhs_class == GIMPLE_BINARY_RHS)
rhs2 = gimple_assign_rhs2 (stmt);
+ type = TREE_TYPE (lhs);
if (!VECTOR_TYPE_P (type)
|| !VECTOR_TYPE_P (TREE_TYPE (rhs1)))
return;
@@ -564,7 +564,7 @@ vect_widened_op_tree (vec_info *vinfo, stmt_vec_info stmt_info, tree_code code,
if (rhs_code != code && rhs_code != widened_code)
return 0;
- tree type = gimple_expr_type (assign);
+ tree type = TREE_TYPE (gimple_assign_lhs (assign));
if (!INTEGRAL_TYPE_P (type))
return 0;
@@ -1006,7 +1006,7 @@ vect_recog_dot_prod_pattern (vec_info *vinfo,
&oprnd0, &oprnd1))
return NULL;
- type = gimple_expr_type (last_stmt);
+ type = TREE_TYPE (gimple_get_lhs (last_stmt));
vect_unpromoted_value unprom_mult;
oprnd0 = vect_look_through_possible_promotion (vinfo, oprnd0, &unprom_mult);
@@ -1135,7 +1135,7 @@ vect_recog_sad_pattern (vec_info *vinfo,
&plus_oprnd0, &plus_oprnd1))
return NULL;
- tree sum_type = gimple_expr_type (last_stmt);
+ tree sum_type = TREE_TYPE (gimple_get_lhs (last_stmt));
/* Any non-truncating sequence of conversions is OK here, since
with a successful match, the result of the ABS(U) is known to fit
@@ -1258,7 +1258,7 @@ vect_recog_widen_op_pattern (vec_info *vinfo,
/* Pattern detected. */
vect_pattern_detected (name, last_stmt);
- tree type = gimple_expr_type (last_stmt);
+ tree type = TREE_TYPE (gimple_get_lhs (last_stmt));
tree itype = type;
if (TYPE_PRECISION (type) != TYPE_PRECISION (half_type) * 2
|| TYPE_UNSIGNED (type) != TYPE_UNSIGNED (half_type))
@@ -1653,7 +1653,7 @@ vect_recog_widen_sum_pattern (vec_info *vinfo,
&oprnd0, &oprnd1))
return NULL;
- type = gimple_expr_type (last_stmt);
+ type = TREE_TYPE (gimple_get_lhs (last_stmt));
/* So far so good. Since last_stmt was detected as a (summation) reduction,
we know that oprnd1 is the reduction variable (defined by a loop-header
@@ -3716,7 +3716,7 @@ vect_recog_mixed_size_cond_pattern (vec_info *vinfo,
if (comp_vectype == NULL_TREE)
return NULL;
- type = gimple_expr_type (last_stmt);
+ type = TREE_TYPE (gimple_assign_lhs (last_stmt));
if (types_compatible_p (type, comp_scalar_type)
|| ((TREE_CODE (then_clause) != INTEGER_CST
|| TREE_CODE (else_clause) != INTEGER_CST)
@@ -10802,8 +10802,6 @@ vect_analyze_stmt (vec_info *vinfo,
if (STMT_VINFO_RELEVANT_P (stmt_info))
{
- tree type = gimple_expr_type (stmt_info->stmt);
- gcc_assert (!VECTOR_MODE_P (TYPE_MODE (type)));
gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
|| (call && gimple_call_lhs (call) == NULL_TREE));
@@ -12076,11 +12074,6 @@ vect_get_vector_types_for_stmt (vec_info *vinfo, stmt_vec_info stmt_info,
"not vectorized: irregular stmt.%G", stmt);
}
- if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
- return opt_result::failure_at (stmt,
- "not vectorized: vector stmt in loop:%G",
- stmt);
-
tree vectype;
tree scalar_type = NULL_TREE;
if (group_size == 0 && STMT_VINFO_VECTYPE (stmt_info))
@@ -12130,6 +12123,12 @@ vect_get_vector_types_for_stmt (vec_info *vinfo, stmt_vec_info stmt_info,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n", vectype);
}
+
+ if (scalar_type && VECTOR_MODE_P (TYPE_MODE (scalar_type)))
+ return opt_result::failure_at (stmt,
+ "not vectorized: vector stmt in loop:%G",
+ stmt);
+
*stmt_vectype_out = vectype;
/* Don't try to compute scalar types if the stmt produces a boolean
@@ -12140,8 +12139,8 @@ vect_get_vector_types_for_stmt (vec_info *vinfo, stmt_vec_info stmt_info,
/* The number of units is set according to the smallest scalar
type (or the largest vector size, but we only support one
vector size per vectorization). */
- HOST_WIDE_INT dummy;
- scalar_type = vect_get_smallest_scalar_type (stmt_info, &dummy, &dummy);
+ scalar_type = vect_get_smallest_scalar_type (stmt_info,
+ TREE_TYPE (vectype));
if (scalar_type != TREE_TYPE (vectype))
{
if (dump_enabled_p ())
@@ -1960,8 +1960,7 @@ extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info, unsigned int = 0);
extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64);
extern enum dr_alignment_support vect_supportable_dr_alignment
(vec_info *, dr_vec_info *, bool);
-extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *,
- HOST_WIDE_INT *);
+extern tree vect_get_smallest_scalar_type (stmt_vec_info, tree);
extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *);
extern bool vect_slp_analyze_instance_dependence (vec_info *, slp_instance);
extern opt_result vect_enhance_data_refs_alignment (loop_vec_info);