===================================================================
@@ -134,6 +134,7 @@ along with GCC; see the file COPYING3.
#include "dbgcnt.h"
#include "gimple-fold.h"
#include "params.h"
+#include "expr.h"
/* Possible lattice values. */
@@ -169,6 +170,7 @@ static prop_value_t *const_val;
static void canonicalize_float_value (prop_value_t *);
static bool ccp_fold_stmt (gimple_stmt_iterator *);
+static prop_value_t get_value_from_alignment (tree);
/* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
@@ -391,6 +393,48 @@ canonicalize_float_value (prop_value_t *
}
}
+static bool
+alignment_to_address_transition_p (prop_value_t old_val, prop_value_t new_val,
+ bool warn)
+{
+ prop_value_t align_val;
+ double_int align_offset;
+ bool compat;
+
+ if (!(old_val.lattice_val == CONSTANT
+ && integer_zerop (old_val.value)
+ && new_val.lattice_val == CONSTANT
+ && TREE_CODE (new_val.value) == ADDR_EXPR))
+ return false;
+
+ align_val = get_value_from_alignment (new_val.value);
+ align_offset = tree_to_double_int (align_val.value);
+ gcc_assert (align_val.lattice_val == CONSTANT);
+
+ /* There are 3 possibilities here:
+ - alignment ADDR_EXPR matches alignment old_val. We gain information in
+ the higher bits, and keep the same information in the align bits.
+ - alignment ADDR_EXPR is smaller than alignment old_val. We gain
+ information in the higher bits, but lose information in the align bits.
+ - alignment ADDR_EXPR is greater than alignment old_val. We gain
+ information both in the higher bits and the align bits.
+ Furthermore, we've detected an misaligned pointer dereference. */
+
+ if (warn)
+ {
+ compat = (double_int_zero_p (double_int_and_not (align_offset,
+ old_val.mask))
+ && double_int_equal_p (old_val.mask,
+ double_int_ior (align_val.mask,
+ old_val.mask)));
+ if (!compat)
+ warning (OPT_Wunaligned_pointer_deref,
+ "misaligned pointer dereferenced");
+ }
+
+ return true;
+}
+
/* Return whether the lattice transition is valid. */
static bool
@@ -414,6 +458,10 @@ valid_lattice_transition (prop_value_t o
&& TREE_CODE (new_val.value) == INTEGER_CST)
return true;
+ /* Allow transitioning from ~3 to &x. */
+ if (alignment_to_address_transition_p (old_val, new_val, false))
+ return true;
+
/* Bit-lattices have to agree in the still valid bits. */
if (TREE_CODE (old_val.value) == INTEGER_CST
&& TREE_CODE (new_val.value) == INTEGER_CST)
@@ -438,6 +486,10 @@ set_lattice_value (tree var, prop_value_
canonicalize_float_value (&new_val);
+ if (old_val->lattice_val == CONSTANT
+ && new_val.lattice_val < CONSTANT)
+ return false;
+
/* We have to be careful to not go up the bitwise lattice
represented by the mask.
??? This doesn't seem to be the best place to enforce this. */
@@ -461,7 +513,8 @@ set_lattice_value (tree var, prop_value_
|| (new_val.lattice_val == CONSTANT
&& TREE_CODE (new_val.value) == INTEGER_CST
&& (TREE_CODE (old_val->value) != INTEGER_CST
- || !double_int_equal_p (new_val.mask, old_val->mask))))
+ || !double_int_equal_p (new_val.mask, old_val->mask)))
+ || alignment_to_address_transition_p (*old_val, new_val, true))
{
/* ??? We would like to delay creation of INTEGER_CSTs from
partially constants here. */
@@ -500,20 +553,14 @@ value_to_double_int (prop_value_t val)
return double_int_zero;
}
-/* Return the value for the address expression EXPR based on alignment
- information. */
+/* Return the value for an expr of type TYPE with alignment ALIGN and offset
+ BITPOS relative to the alignment. */
static prop_value_t
-get_value_from_alignment (tree expr)
+get_align_value (unsigned int align, tree type, unsigned HOST_WIDE_INT bitpos)
{
- tree type = TREE_TYPE (expr);
prop_value_t val;
- unsigned HOST_WIDE_INT bitpos;
- unsigned int align;
-
- gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
- align = get_object_alignment_1 (TREE_OPERAND (expr, 0), &bitpos);
val.mask
= double_int_and_not (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
? double_int_mask (TYPE_PRECISION (type))
@@ -529,6 +576,21 @@ get_value_from_alignment (tree expr)
return val;
}
+/* Return the value for the address expression EXPR based on alignment
+ information. */
+
+static prop_value_t
+get_value_from_alignment (tree expr)
+{
+ unsigned int align;
+ unsigned HOST_WIDE_INT bitpos;
+
+ gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
+
+ align = get_object_alignment_1 (TREE_OPERAND (expr, 0), &bitpos);
+ return get_align_value (align, TREE_TYPE (expr), bitpos);
+}
+
/* Return the value for the tree operand EXPR. If FOR_BITS_P is true
return constant bits extracted from alignment information for
invariant addresses. */
@@ -707,25 +769,131 @@ surely_varying_stmt_p (gimple stmt)
return false;
}
+/* Find pointer dereference and init lattice value for that pointer based on
+ alignment, and propagate backward through pointer arithmetic. */
+
+static void
+deduce_alignment_from_dereference (gimple stmt)
+{
+ gimple def;
+ unsigned int align, misalign = 0;
+ tree memref, ptr, offset;
+ HOST_WIDE_INT offset_val;
+ struct ptr_info_def *pi;
+ prop_value_t val;
+
+ if (!is_gimple_assign (stmt))
+ return;
+
+ if (gimple_assign_rhs_code (stmt) == MEM_REF)
+ {
+ memref = gimple_assign_rhs1 (stmt);
+
+ align = get_object_or_type_alignment (memref) / BITS_PER_UNIT;
+ if (align == 1)
+ return;
+
+ offset = TREE_OPERAND (memref, 1);
+ if (!host_integerp (offset, 0))
+ return;
+ offset_val = tree_low_cst (offset, 0);
+ offset_val = offset_val % align;
+ misalign = (align + misalign - offset_val) % align;
+
+ ptr = TREE_OPERAND (memref, 0);
+ }
+ else
+ /* Todo: handle more cases. */
+ return;
+
+ while (true)
+ {
+
+ if (TREE_CODE (ptr) == INTEGER_CST)
+ {
+ if (host_integerp (ptr, 0)
+ && tree_low_cst (ptr, 0) % align != misalign)
+ warning (OPT_Wunaligned_pointer_deref,
+ "misaligned constant pointer dereferenced");
+ return;
+ }
+
+ if (TREE_CODE (ptr) != SSA_NAME)
+ return;
+
+ pi = get_ptr_info (ptr);
+
+ if ((pi->align < align && misalign % pi->align != pi->misalign)
+ || (pi->align >= align && pi->misalign % align != misalign))
+ {
+ warning (OPT_Wunaligned_pointer_deref,
+ "misaligned pointer dereferenced");
+ return;
+ }
+
+ if (pi->align >= align)
+ return;
+
+ val = get_align_value (align * BITS_PER_UNIT, TREE_TYPE (ptr),
+ misalign * BITS_PER_UNIT);
+ const_val[SSA_NAME_VERSION (ptr)] = val;
+
+ if (SSA_NAME_IS_DEFAULT_DEF (ptr))
+ return;
+
+ /* Propagate backwards over pointer arithmetic. */
+ def = SSA_NAME_DEF_STMT (ptr);
+ if (!is_gimple_assign (def))
+ return;
+
+ switch (gimple_assign_rhs_code (def))
+ {
+ case POINTER_PLUS_EXPR:
+ offset = gimple_assign_rhs2 (def);
+ if (!host_integerp (offset, 0))
+ return;
+ offset_val = tree_low_cst (offset, 0);
+ offset_val = offset_val % align;
+ misalign = (align + misalign - offset_val) % align;
+ ptr = gimple_assign_rhs1 (def);
+ break;
+ case INTEGER_CST:
+ case SSA_NAME:
+ ptr = gimple_assign_rhs1 (def);
+ break;
+ default:
+ /* Todo: handle more cases. */
+ return;
+ }
+ }
+}
+
/* Initialize local data structures for CCP. */
static void
ccp_initialize (void)
{
- basic_block bb;
+ basic_block bb, entry;
const_val = XCNEWVEC (prop_value_t, num_ssa_names);
+ /* Needs to be the successor of entry, for CDI_POST_DOMINATORS. */
+ entry = single_succ (ENTRY_BLOCK_PTR);
+
/* Initialize simulation flags for PHI nodes and statements. */
FOR_EACH_BB (bb)
{
gimple_stmt_iterator i;
+ bool post_dom_by_entry = dominated_by_p (CDI_POST_DOMINATORS, entry, bb);
for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
{
gimple stmt = gsi_stmt (i);
bool is_varying;
+ if (post_dom_by_entry)
+ deduce_alignment_from_dereference (stmt);
+
/* If the statement is a control insn, then we do not
want to avoid simulating the statement once. Failure
to do so means that those edges will never get added. */
@@ -2024,7 +2192,9 @@ ccp_visit_stmt (gimple stmt, edge *taken
static unsigned int
do_ssa_ccp (void)
{
+ calculate_dominance_info (CDI_POST_DOMINATORS);
ccp_initialize ();
+ free_dominance_info (CDI_POST_DOMINATORS);
ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
if (ccp_finalize ())
return (TODO_cleanup_cfg | TODO_update_ssa | TODO_remove_unused_locals);
===================================================================
@@ -4553,7 +4553,7 @@ get_bit_range (unsigned HOST_WIDE_INT *b
their type, so we optimistically fall back to the alignment of the
type when we cannot compute a misalignment. */
-static unsigned int
+unsigned int
get_object_or_type_alignment (tree exp)
{
unsigned HOST_WIDE_INT misalign;
===================================================================
@@ -397,6 +397,10 @@ extern rtx push_block (rtx, int, int);
extern void emit_push_insn (rtx, enum machine_mode, tree, rtx, unsigned int,
int, rtx, int, rtx, rtx, int, rtx);
+/* Return the alignment of the object EXP, also considering its type
+ when we do not know of explicit misalignment. */
+unsigned int get_object_or_type_alignment (tree);
+
/* Expand an assignment that stores the value of FROM into TO. */
extern void expand_assignment (tree, tree, bool);
===================================================================
@@ -511,6 +511,10 @@ Wcast-align
Common Var(warn_cast_align) Warning
Warn about pointer casts which increase alignment
+Wunaligned-pointer-deref
+Common Var(warn_unaligned_pointer_deref) Warning
+Warn about unaligned pointers which are unconditionally dereferenced
+
Wcpp
Common Var(warn_cpp) Init(1) Warning
Warn when a #warning directive is encountered
===================================================================
@@ -0,0 +1,33 @@
+/* { dg-do compile } */
+/* { dg-options "-Os -finline-functions -mno-unaligned-access -fdump-rtl-expand" } */
+
+typedef unsigned int size_t;
+extern void* memcpy (void *, const void *, size_t);
+
+typedef union JValue {
+ void* l;
+} JValue;
+typedef struct Object {
+ int x;
+} Object;
+
+extern __inline__ long long
+dvmGetArgLong (const unsigned int* args, int elem)
+{
+ long long val;
+ memcpy (&val, &args[elem], 8);
+ return val;
+}
+
+void
+Dalvik_sun_misc_Unsafe_getObject (const unsigned int* args, JValue* pResult)
+{
+ Object* obj = (Object*) args[1];
+ long long offset = dvmGetArgLong (args, 2);
+ Object** address = (Object**) (((unsigned char*) obj) + offset);
+ pResult->l = ((void*) *address);
+}
+
+/* { dg-final { scan-rtl-dump-times "memcpy" 0 "expand"} } */
+/* { dg-final { cleanup-tree-dump "expand" } } */
+
===================================================================
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -Wunaligned-pointer-deref" } */
+
+int *y;
+
+int
+f()
+{
+ int *p = (int*)0x10000001;
+ y = p;
+ return *p;
+}
+
+/* { dg-warning "misaligned constant pointer dereferenced" "" { target *-*-* } 7 } */
+/* { dg-warning "misaligned constant pointer dereferenced" "" { target *-*-* } 12 } */
===================================================================
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -Wunaligned-pointer-deref" } */
+
+short
+foo (void)
+{
+ int a = 0;
+ short *b = (short *) (((char *)&a) + 1);
+ return *b;
+}
+
+/* { dg-warning "misaligned pointer dereferenced" "" { target *-*-* } 10 } */
+
===================================================================
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-ccp1" } */
+
+int y;
+
+unsigned long int
+f (int *p)
+{
+ y = *p;
+ return ((unsigned long int)p) & (sizeof (*p) - 1);
+}
+
+/* { dg-final { scan-tree-dump-times "return 0" 1 "ccp1"} } */
+/* { dg-final { cleanup-tree-dump "ccp1" } } */