@@ -2534,7 +2534,7 @@ memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
{
HOST_WIDE_INT sc = INTVAL (XEXP (x, 1));
unsigned HOST_WIDE_INT uc = sc;
- if (sc < 0 && -uc == (uc & -uc))
+ if (sc < 0 && pow2_or_zerop (-uc))
{
if (xsize > 0)
xsize = -xsize;
@@ -2549,7 +2549,7 @@ memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
{
HOST_WIDE_INT sc = INTVAL (XEXP (y, 1));
unsigned HOST_WIDE_INT uc = sc;
- if (sc < 0 && -uc == (uc & -uc))
+ if (sc < 0 && pow2_or_zerop (-uc))
{
if (ysize > 0)
ysize = -ysize;
@@ -305,7 +305,7 @@ get_object_alignment_2 (tree exp, unsigned int *alignp,
{
ptr_bitmask = TREE_INT_CST_LOW (TREE_OPERAND (addr, 1));
ptr_bitmask *= BITS_PER_UNIT;
- align = ptr_bitmask & -ptr_bitmask;
+ align = least_bit_hwi (ptr_bitmask);
addr = TREE_OPERAND (addr, 0);
}
@@ -325,7 +325,7 @@ get_object_alignment_2 (tree exp, unsigned int *alignp,
unsigned HOST_WIDE_INT step = 1;
if (TMR_STEP (exp))
step = TREE_INT_CST_LOW (TMR_STEP (exp));
- align = MIN (align, (step & -step) * BITS_PER_UNIT);
+ align = MIN (align, least_bit_hwi (step) * BITS_PER_UNIT);
}
if (TMR_INDEX2 (exp))
align = BITS_PER_UNIT;
@@ -404,7 +404,7 @@ get_object_alignment (tree exp)
ptr & (align - 1) == bitpos. */
if (bitpos != 0)
- align = (bitpos & -bitpos);
+ align = least_bit_hwi (bitpos);
return align;
}
@@ -502,7 +502,7 @@ get_pointer_alignment (tree exp)
ptr & (align - 1) == bitpos. */
if (bitpos != 0)
- align = (bitpos & -bitpos);
+ align = least_bit_hwi (bitpos);
return align;
}
@@ -5565,7 +5565,7 @@ fold_builtin_atomic_always_lock_free (tree arg0, tree arg1)
/* Either this argument is null, or it's a fake pointer encoding
the alignment of the object. */
- val = val & -val;
+ val = least_bit_hwi (val);
val *= BITS_PER_UNIT;
if (val == 0 || mode_align < val)
@@ -1805,7 +1805,7 @@ compute_argument_addresses (struct arg_data *args, rtx argblock, int num_actuals
else if (CONST_INT_P (offset))
{
align = INTVAL (offset) * BITS_PER_UNIT | boundary;
- align = align & -align;
+ align = least_bit_hwi (align);
}
set_mem_align (args[i].stack, align);
@@ -5026,7 +5026,7 @@ store_one_arg (struct arg_data *arg, rtx argblock, int flags,
int pad = used - size;
if (pad)
{
- unsigned int pad_align = (pad & -pad) * BITS_PER_UNIT;
+ unsigned int pad_align = least_bit_hwi (pad) * BITS_PER_UNIT;
parm_align = MIN (parm_align, pad_align);
}
}
@@ -5086,7 +5086,7 @@ store_one_arg (struct arg_data *arg, rtx argblock, int flags,
parm_align = BITS_PER_UNIT;
else if (excess)
{
- unsigned int excess_align = (excess & -excess) * BITS_PER_UNIT;
+ unsigned int excess_align = least_bit_hwi (excess) * BITS_PER_UNIT;
parm_align = MIN (parm_align, excess_align);
}
}
@@ -1009,7 +1009,7 @@ expand_one_stack_var_at (tree decl, rtx base, unsigned base_align,
important, we'll simply use the alignment that is already set. */
if (base == virtual_stack_vars_rtx)
offset -= frame_phase;
- align = offset & -offset;
+ align = least_bit_hwi (offset);
align *= BITS_PER_UNIT;
if (align == 0 || align > base_align)
align = base_align;
@@ -8557,7 +8557,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
/* If X is (minus C Y) where C's least set bit is larger than any bit
in the mask, then we may replace with (neg Y). */
if (CONST_INT_P (XEXP (x, 0))
- && ((UINTVAL (XEXP (x, 0)) & -UINTVAL (XEXP (x, 0))) > mask))
+ && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
{
x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
GET_MODE (x));
@@ -1843,7 +1843,7 @@ check_bases (tree t,
doesn't define its own, then the current class inherits one. */
if (seen_tm_mask && !find_tm_attribute (TYPE_ATTRIBUTES (t)))
{
- tree tm_attr = tm_mask_to_attr (seen_tm_mask & -seen_tm_mask);
+ tree tm_attr = tm_mask_to_attr (least_bit_hwi (seen_tm_mask));
TYPE_ATTRIBUTES (t) = tree_cons (tm_attr, NULL, TYPE_ATTRIBUTES (t));
}
}
@@ -5074,7 +5074,7 @@ set_one_vmethod_tm_attributes (tree type, tree fndecl)
restrictive one. */
else if (tm_attr == NULL)
{
- apply_tm_attr (fndecl, tm_mask_to_attr (found & -found));
+ apply_tm_attr (fndecl, tm_mask_to_attr (least_bit_hwi (found)));
}
/* Otherwise validate that we're not weaker than a function
that is being overridden. */
@@ -1964,7 +1964,7 @@ set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp,
get_object_alignment_1 (t, &obj_align, &obj_bitpos);
obj_bitpos = (obj_bitpos - bitpos) & (obj_align - 1);
if (obj_bitpos != 0)
- obj_align = (obj_bitpos & -obj_bitpos);
+ obj_align = least_bit_hwi (obj_bitpos);
attrs.align = MAX (attrs.align, obj_align);
}
@@ -2298,7 +2298,7 @@ adjust_address_1 (rtx memref, machine_mode mode, HOST_WIDE_INT offset,
if zero. */
if (offset != 0)
{
- max_align = (offset & -offset) * BITS_PER_UNIT;
+ max_align = least_bit_hwi (offset) * BITS_PER_UNIT;
attrs.align = MIN (attrs.align, max_align);
}
@@ -127,10 +127,10 @@ init_expmed_one_conv (struct init_expmed_rtl *all, machine_mode to_mode,
comparison purposes here, reduce the bit size by one in that
case. */
if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT
- && exact_log2 (to_size) != -1)
+ && pow2p_hwi (to_size))
to_size --;
if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT
- && exact_log2 (from_size) != -1)
+ && pow2p_hwi (from_size))
from_size --;
/* Assume cost of zero-extend and sign-extend is the same. */
@@ -2636,7 +2636,7 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
if ((t & 1) == 0)
{
do_alg_shift:
- m = floor_log2 (t & -t); /* m = number of low zero bits */
+ m = ctz_or_zero (t); /* m = number of low zero bits */
if (m < maxm)
{
q = t >> m;
@@ -2873,9 +2873,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
{
do_alg_add_t2_m:
q = t - 1;
- q = q & -q;
- m = exact_log2 (q);
- if (m >= 0 && m < maxm)
+ m = ctz_hwi (q);
+ if (q && m < maxm)
{
op_cost = shiftadd_cost (speed, mode, m);
new_limit.cost = best_cost.cost - op_cost;
@@ -2897,9 +2896,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
do_alg_sub_t2_m:
q = t + 1;
- q = q & -q;
- m = exact_log2 (q);
- if (m >= 0 && m < maxm)
+ m = ctz_hwi (q);
+ if (q && m < maxm)
{
op_cost = shiftsub0_cost (speed, mode, m);
new_limit.cost = best_cost.cost - op_cost;
@@ -4215,7 +4213,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode,
initial right shift. */
if (mh != 0 && (d & 1) == 0)
{
- pre_shift = floor_log2 (d & -d);
+ pre_shift = ctz_or_zero (d);
mh = choose_multiplier (d >> pre_shift, size,
size - pre_shift,
&ml, &post_shift, &dummy);
@@ -4873,7 +4871,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode,
int pre_shift;
rtx t1;
- pre_shift = floor_log2 (d & -d);
+ pre_shift = ctz_or_zero (d);
ml = invert_mod2n (d >> pre_shift, size);
t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
pre_shift, NULL_RTX, unsignedp);
@@ -10005,7 +10005,7 @@ fold_binary_loc (location_t loc,
mode which allows further optimizations. */
int pop = wi::popcount (warg1);
if (!(pop >= BITS_PER_UNIT
- && exact_log2 (pop) != -1
+ && pow2p_hwi (pop)
&& wi::mask (pop, false, warg1.get_precision ()) == warg1))
return fold_build2_loc (loc, code, type, op0,
wide_int_to_tree (type, masked));
@@ -14251,7 +14251,7 @@ round_up_loc (location_t loc, tree value, unsigned int divisor)
}
/* If divisor is a power of two, simplify this to bit manipulation. */
- if (divisor == (divisor & -divisor))
+ if (pow2_or_zerop (divisor))
{
if (TREE_CODE (value) == INTEGER_CST)
{
@@ -14314,7 +14314,7 @@ round_down_loc (location_t loc, tree value, int divisor)
}
/* If divisor is a power of two, simplify this to bit manipulation. */
- if (divisor == (divisor & -divisor))
+ if (pow2_or_zerop (divisor))
{
tree t;
@@ -2716,7 +2716,7 @@ assign_parm_find_stack_rtl (tree parm, struct assign_parm_data_one *data)
else if (CONST_INT_P (offset_rtx))
{
align = INTVAL (offset_rtx) * BITS_PER_UNIT | boundary;
- align = align & -align;
+ align = least_bit_hwi (align);
}
set_mem_align (stack_parm, align);
@@ -699,7 +699,7 @@ gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi,
&& !c_strlen (src, 2))
{
unsigned ilen = tree_to_uhwi (len);
- if (exact_log2 (ilen) != -1)
+ if (pow2p_hwi (ilen))
{
tree type = lang_hooks.types.type_for_size (ilen * 8, 1);
if (type
@@ -1874,7 +1874,7 @@ replace_ref (tree *expr, slsr_cand_t c)
requirement for the data type. See PR58041. */
get_object_alignment_1 (*expr, &align, &misalign);
if (misalign != 0)
- align = (misalign & -misalign);
+ align = least_bit_hwi (misalign);
if (align < TYPE_ALIGN (acc_type))
acc_type = build_aligned_type (acc_type, align);
@@ -2207,7 +2207,7 @@ gen_hsa_addr_with_align (tree ref, hsa_bb *hbb, BrigAlignment8_t *output_align)
unsigned align = hsa_byte_alignment (addr->m_symbol->m_align);
unsigned misalign = addr->m_imm_offset & (align - 1);
if (misalign)
- align = (misalign & -misalign);
+ align = least_bit_hwi (misalign);
*output_align = hsa_alignment_encoding (BITS_PER_UNIT * align);
}
return addr;
@@ -2434,7 +2434,7 @@ hsa_bitmemref_alignment (tree ref)
BrigAlignment8_t base = hsa_object_alignment (ref);
if (byte_bits == 0)
return base;
- return MIN (base, hsa_alignment_encoding (byte_bits & -byte_bits));
+ return MIN (base, hsa_alignment_encoding (least_bit_hwi (byte_bits)));
}
/* Generate HSAIL instructions loading something into register DEST. RHS is
@@ -74,7 +74,7 @@ ceil_log2 (unsigned HOST_WIDE_INT x)
int
exact_log2 (unsigned HOST_WIDE_INT x)
{
- if (x != (x & -x))
+ if (!pow2p_hwi (x))
return -1;
return floor_log2 (x);
}
@@ -85,7 +85,7 @@ exact_log2 (unsigned HOST_WIDE_INT x)
int
ctz_hwi (unsigned HOST_WIDE_INT x)
{
- return x ? floor_log2 (x & -x) : HOST_BITS_PER_WIDE_INT;
+ return x ? floor_log2 (least_bit_hwi (x)) : HOST_BITS_PER_WIDE_INT;
}
/* Similarly for most significant bits. */
@@ -102,7 +102,7 @@ clz_hwi (unsigned HOST_WIDE_INT x)
int
ffs_hwi (unsigned HOST_WIDE_INT x)
{
- return 1 + floor_log2 (x & -x);
+ return 1 + floor_log2 (least_bit_hwi (x));
}
/* Return the number of set bits in X. */
@@ -219,10 +219,42 @@ ceil_log2 (unsigned HOST_WIDE_INT x)
return floor_log2 (x - 1) + 1;
}
+/* Return X with all but the lowest bit masked off. */
+
+inline unsigned HOST_WIDE_INT
+least_bit_hwi (unsigned HOST_WIDE_INT x)
+{
+ return (x & -x);
+}
+
+/* True if X is zero or a power of two. */
+
+inline bool
+pow2_or_zerop (unsigned HOST_WIDE_INT x)
+{
+ return least_bit_hwi (x) == x;
+}
+
+/* True if X is a power of two. */
+
+inline bool
+pow2p_hwi (unsigned HOST_WIDE_INT x)
+{
+ return x && pow2_or_zerop (x);
+}
+
+/* Like ctz_hwi, except 0 when x == 0. */
+
+inline int
+ctz_or_zero (unsigned HOST_WIDE_INT x)
+{
+ return ffs_hwi (x) - 1;
+}
+
static inline int
exact_log2 (unsigned HOST_WIDE_INT x)
{
- return x == (x & -x) && x ? ctz_hwi (x) : -1;
+ return pow2p_hwi (x) ? ctz_hwi (x) : -1;
}
#endif /* GCC_VERSION >= 3004 */
@@ -929,7 +929,7 @@ ipcp_alignment_lattice::meet_with_1 (unsigned new_align, unsigned new_misalign)
if (misalign != (new_misalign % align))
{
int diff = abs ((int) misalign - (int) (new_misalign % align));
- align = (unsigned) diff & -diff;
+ align = least_bit_hwi (diff);
if (align)
misalign = misalign % align;
else
@@ -4170,7 +4170,7 @@ ipa_modify_call_arguments (struct cgraph_edge *cs, gcall *stmt,
* BITS_PER_UNIT);
misalign = misalign & (align - 1);
if (misalign != 0)
- align = (misalign & -misalign);
+ align = least_bit_hwi (misalign);
if (align < TYPE_ALIGN (type))
type = build_aligned_type (type, align);
base = force_gimple_operand_gsi (&gsi, base,
@@ -19452,7 +19452,7 @@ oacc_loop_fixed_partitions (oacc_loop *loop, unsigned outer_mask)
}
else
{
- unsigned outermost = this_mask & -this_mask;
+ unsigned outermost = least_bit_hwi (this_mask);
if (outermost && outermost <= outer_mask)
{
@@ -19533,7 +19533,7 @@ oacc_loop_auto_partitions (oacc_loop *loop, unsigned outer_mask)
/* Determine the outermost partitioning used within this loop. */
this_mask = loop->inner | GOMP_DIM_MASK (GOMP_DIM_MAX);
- this_mask = (this_mask & -this_mask);
+ this_mask = least_bit_hwi (this_mask);
/* Pick the partitioning just inside that one. */
this_mask >>= 1;
@@ -4511,8 +4511,8 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
int width0 = floor_log2 (nz0) + 1;
int width1 = floor_log2 (nz1) + 1;
- int low0 = floor_log2 (nz0 & -nz0);
- int low1 = floor_log2 (nz1 & -nz1);
+ int low0 = ctz_or_zero (nz0);
+ int low1 = ctz_or_zero (nz1);
unsigned HOST_WIDE_INT op0_maybe_minusp
= nz0 & (HOST_WIDE_INT_1U << sign_index);
unsigned HOST_WIDE_INT op1_maybe_minusp
@@ -1169,14 +1169,12 @@ place_field (record_layout_info rli, tree field)
/* Work out the known alignment so far. Note that A & (-A) is the
value of the least-significant bit in A that is one. */
if (! integer_zerop (rli->bitpos))
- known_align = (tree_to_uhwi (rli->bitpos)
- & - tree_to_uhwi (rli->bitpos));
+ known_align = least_bit_hwi (tree_to_uhwi (rli->bitpos));
else if (integer_zerop (rli->offset))
known_align = 0;
else if (tree_fits_uhwi_p (rli->offset))
known_align = (BITS_PER_UNIT
- * (tree_to_uhwi (rli->offset)
- & - tree_to_uhwi (rli->offset)));
+ * least_bit_hwi (tree_to_uhwi (rli->offset)));
else
known_align = rli->offset_align;
@@ -1479,14 +1477,12 @@ place_field (record_layout_info rli, tree field)
approximate this by seeing if its position changed), lay out the field
again; perhaps we can use an integral mode for it now. */
if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
- actual_align = (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
- & - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)));
+ actual_align = least_bit_hwi (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)));
else if (integer_zerop (DECL_FIELD_OFFSET (field)))
actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
actual_align = (BITS_PER_UNIT
- * (tree_to_uhwi (DECL_FIELD_OFFSET (field))
- & - tree_to_uhwi (DECL_FIELD_OFFSET (field))));
+ * least_bit_hwi (tree_to_uhwi (DECL_FIELD_OFFSET (field))));
else
actual_align = DECL_OFFSET_ALIGN (field);
/* ACTUAL_ALIGN is still the actual alignment *within the record* .
@@ -1353,7 +1353,7 @@ dump_generic_node (pretty_printer *pp, tree node, int spc, int flags,
? "unsigned long long"
: "signed long long"));
else if (TYPE_PRECISION (node) >= CHAR_TYPE_SIZE
- && exact_log2 (TYPE_PRECISION (node)) != -1)
+ && pow2p_hwi (TYPE_PRECISION (node)))
{
pp_string (pp, (TYPE_UNSIGNED (node) ? "uint" : "int"));
pp_decimal_int (pp, TYPE_PRECISION (node));
@@ -1680,7 +1680,7 @@ build_ref_for_offset (location_t loc, tree base, HOST_WIDE_INT offset,
misalign = (misalign + offset) & (align - 1);
if (misalign != 0)
- align = (misalign & -misalign);
+ align = least_bit_hwi (misalign);
if (align != TYPE_ALIGN (exp_type))
exp_type = build_aligned_type (exp_type, align);
@@ -929,7 +929,7 @@ ccp_finalize (bool nonzero_p)
/* Trailing mask bits specify the alignment, trailing value
bits the misalignment. */
tem = val->mask.to_uhwi ();
- align = (tem & -tem);
+ align = least_bit_hwi (tem);
if (align > 1)
set_ptr_info_alignment (get_ptr_info (name), align,
(TREE_INT_CST_LOW (val->value)
@@ -2647,7 +2647,7 @@ bswap_replace (gimple *cur_stmt, gimple *src_stmt, tree fndecl,
unsigned HOST_WIDE_INT l
= (load_offset * BITS_PER_UNIT) & (align - 1);
if (l)
- align = l & -l;
+ align = least_bit_hwi (l);
}
}
@@ -1983,7 +1983,7 @@ handle_builtin_memcmp (gimple_stmt_iterator *gsi)
if (tree_fits_uhwi_p (len)
&& (leni = tree_to_uhwi (len)) <= GET_MODE_SIZE (word_mode)
- && exact_log2 (leni) != -1)
+ && pow2p_hwi (leni))
{
leni *= CHAR_TYPE_SIZE;
unsigned align1 = get_pointer_alignment (arg1);
@@ -2241,7 +2241,7 @@ vect_analyze_group_access_1 (struct data_reference *dr)
if (DR_IS_READ (dr)
&& (dr_step % type_size) == 0
&& groupsize > 0
- && exact_log2 (groupsize) != -1)
+ && pow2p_hwi (groupsize))
{
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
@@ -4736,7 +4736,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
else
{
/* If length is not equal to 3 then only power of 2 is supported. */
- gcc_assert (exact_log2 (count) != -1);
+ gcc_assert (pow2p_hwi (count));
for (i = 0; i < nelt / 2; i++)
{
@@ -4914,7 +4914,7 @@ vect_permute_store_chain (vec<tree> dr_chain,
else
{
/* If length is not equal to 3 then only power of 2 is supported. */
- gcc_assert (exact_log2 (length) != -1);
+ gcc_assert (pow2p_hwi (length));
for (i = 0, n = nelt / 2; i < n; i++)
{
@@ -5309,7 +5309,7 @@ vect_grouped_load_supported (tree vectype, bool single_element_p,
else
{
/* If length is not equal to 3 then only power of 2 is supported. */
- gcc_assert (exact_log2 (count) != -1);
+ gcc_assert (pow2p_hwi (count));
for (i = 0; i < nelt; i++)
sel[i] = i * 2;
if (can_vec_perm_p (mode, false, sel))
@@ -5483,7 +5483,7 @@ vect_permute_load_chain (vec<tree> dr_chain,
else
{
/* If length is not equal to 3 then only power of 2 is supported. */
- gcc_assert (exact_log2 (length) != -1);
+ gcc_assert (pow2p_hwi (length));
for (i = 0; i < nelt; ++i)
sel[i] = i * 2;
@@ -5632,7 +5632,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
memcpy (result_chain->address (), dr_chain.address (),
length * sizeof (tree));
- if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
+ if (pow2p_hwi (length) && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
{
unsigned int j, log_length = exact_log2 (length);
for (i = 0; i < nelt / 2; ++i)
@@ -5880,7 +5880,7 @@ vect_transform_grouped_load (gimple *stmt, vec<tree> dr_chain, int size,
get chain for loads group using vect_shift_permute_load_chain. */
mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)));
if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
- || exact_log2 (size) != -1
+ || pow2p_hwi (size)
|| !vect_shift_permute_load_chain (dr_chain, size, stmt,
gsi, &result_chain))
vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
@@ -494,7 +494,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0,
|| (!has_vector_shift && pre_shift != -1))
{
if (has_vector_shift)
- pre_shift = floor_log2 (d & -d);
+ pre_shift = ctz_or_zero (d);
else if (pre_shift == -1)
{
unsigned int j;
@@ -2736,7 +2736,7 @@ vect_recog_divmod_pattern (vec<gimple *> *stmts,
for even divisors, using an initial right shift. */
if (mh != 0 && (d & 1) == 0)
{
- pre_shift = floor_log2 (d & -d);
+ pre_shift = ctz_or_zero (d);
mh = choose_multiplier (d >> pre_shift, prec, prec - pre_shift,
&ml, &post_shift, &dummy_int);
gcc_assert (!mh);
@@ -2340,7 +2340,7 @@ vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
misalign);
tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
- misalign ? misalign & -misalign : align);
+ misalign ? least_bit_hwi (misalign) : align);
new_stmt
= gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
ptr, vec_mask, vec_rhs);
@@ -2390,7 +2390,7 @@ vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
misalign);
tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
- misalign ? misalign & -misalign : align);
+ misalign ? least_bit_hwi (misalign) : align);
new_stmt
= gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
ptr, vec_mask);
@@ -174,7 +174,7 @@ instrument_expr (gimple_stmt_iterator gsi, tree expr, bool is_write)
if ((align - 1) & bitpos)
{
align = (align - 1) & bitpos;
- align = align & -align;
+ align = least_bit_hwi (align);
}
expr = build_fold_addr_expr (unshare_expr (base));
expr = build2 (MEM_REF, char_type_node, expr,
@@ -1983,7 +1983,7 @@ static bool
negative_power_of_two_p (HOST_WIDE_INT i)
{
unsigned HOST_WIDE_INT x = -(unsigned HOST_WIDE_INT)i;
- return x == (x & -x);
+ return pow2_or_zerop (x);
}
/* Strip constant offsets and alignments off of LOC. Return the base
@@ -2632,7 +2632,7 @@ assemble_trampoline_template (void)
static inline unsigned
min_align (unsigned int a, unsigned int b)
{
- return (a | b) & -(a | b);
+ return least_bit_hwi (a | b);
}
/* Return the assembler directive for creating a given kind of integer