@@ -8227,12 +8227,20 @@ make_compound_operation_int (scalar_int_
int sub_width;
if ((REG_P (sub) || MEM_P (sub))
&& GET_MODE_PRECISION (sub_mode).is_constant (&sub_width)
- && sub_width < mode_width)
+ && sub_width < mode_width
+ && (!WORD_REGISTER_OPERATIONS
+ || sub_width >= BITS_PER_WORD
+ /* On WORD_REGISTER_OPERATIONS targets the bits
+ beyond sub_mode aren't considered undefined,
+ so optimize only if it is a MEM load when MEM loads
+ zero extend, because then the upper bits are all zero. */
+ || (MEM_P (sub)
+ && load_extend_op (sub_mode) == ZERO_EXTEND)))
{
unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
unsigned HOST_WIDE_INT mask;
- /* original AND constant with all the known zero bits set */
+ /* Original AND constant with all the known zero bits set. */
mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
if ((mask & mode_mask) == mode_mask)
{
@@ -0,0 +1,15 @@
+/* PR rtl-optimization/112758 */
+
+int a = -__INT_MAX__ - 1;
+
+int
+main ()
+{
+ if (-__INT_MAX__ - 1U == 0x80000000ULL)
+ {
+ unsigned long long b = 0xffff00ffffffffffULL;
+ if ((b & a) != 0xffff00ff80000000ULL)
+ __builtin_abort ();
+ }
+ return 0;
+}