Patchwork VRP wrapping MULT_EXPR

login
register
mail settings
Submitter Marc Glisse
Date Aug. 2, 2012, 9:49 p.m.
Message ID <alpine.DEB.2.02.1208022331010.19386@stedding.saclay.inria.fr>
Download mbox | patch
Permalink /patch/174815/
State New
Headers show

Comments

Marc Glisse - Aug. 2, 2012, 9:49 p.m.
Hello,

here is a patch handling multiplication of wrapping integer types in VRP. 
It passed bootstrap+testsuite limited to c,c++ and without the testcase, 
so I'll retest it better during the night. For some reason the test 
libgomp.graphite/force-parallel-6.c which used to fail passed with the 
patch.

gcc/
2012-08-03 Marc Glisse <marc.glisse@inria.fr>

 	PR tree-optimization/30318
 	* double-int.c (mul_double_wide_with_sign): New function.
 	(mul_double_with_sign): Call the new function.
 	* double-int.h (mul_double_wide_with_sign): Declare the new function.
 	* tree-vrp.c (extract_range_from_binary_expr_1) [MULT_EXPR]:
 	Handle integer types that wrap on overflow.
 	(quad_int_cmp): New helper function.
 	(quad_int_pair_sort): Likewise.

gcc/testsuite/
2012-08-03 Marc Glisse <marc.glisse@inria.fr>

 	PR tree-optimization/30318
 	* gcc.dg/tree-ssa/vrp77.c: New testcase.
Richard Guenther - Aug. 3, 2012, 8 a.m.
On Thu, Aug 2, 2012 at 11:49 PM, Marc Glisse <marc.glisse@inria.fr> wrote:
> Hello,
>
> here is a patch handling multiplication of wrapping integer types in VRP. It
> passed bootstrap+testsuite limited to c,c++ and without the testcase, so
> I'll retest it better during the night. For some reason the test
> libgomp.graphite/force-parallel-6.c which used to fail passed with the
> patch.
>
> gcc/
> 2012-08-03 Marc Glisse <marc.glisse@inria.fr>
>
>         PR tree-optimization/30318
>         * double-int.c (mul_double_wide_with_sign): New function.
>         (mul_double_with_sign): Call the new function.
>         * double-int.h (mul_double_wide_with_sign): Declare the new
> function.
>         * tree-vrp.c (extract_range_from_binary_expr_1) [MULT_EXPR]:
>         Handle integer types that wrap on overflow.
>         (quad_int_cmp): New helper function.
>         (quad_int_pair_sort): Likewise.
>
> gcc/testsuite/
> 2012-08-03 Marc Glisse <marc.glisse@inria.fr>
>
>         PR tree-optimization/30318
>         * gcc.dg/tree-ssa/vrp77.c: New testcase.
>
> --
> Marc Glisse
> Index: gcc/tree-vrp.c
> ===================================================================
> --- gcc/tree-vrp.c      (revision 190103)
> +++ gcc/tree-vrp.c      (working copy)
> @@ -2181,20 +2181,42 @@ extract_range_from_multiplicative_op_1 (
>      {
>        /* If the new range has its limits swapped around (MIN > MAX),
>          then the operation caused one of them to wrap around, mark
>          the new range VARYING.  */
>        set_value_range_to_varying (vr);
>      }
>    else
>      set_value_range (vr, type, min, max, NULL);
>  }
>
> +/* Some quadruple precision helpers.  */
> +static int
> +quad_int_cmp (double_int l0, double_int h0,
> +             double_int l1, double_int h1, bool uns)
> +{
> +  int c = double_int_cmp (h0, h1, uns);
> +  if (c != 0) return c;
> +  return double_int_ucmp (l0, l1);
> +}

I suppose that's appropriate for double-int.h as static inline function.

Ok with or without moving it (we can do that as followup anyway) if
testing is ok.

Thanks,
Richard.

> +static void
> +quad_int_pair_sort (double_int *l0, double_int *h0,
> +                   double_int *l1, double_int *h1, bool uns)
> +{
> +  if (quad_int_cmp (*l0, *h0, *l1, *h1, uns) > 0)
> +    {
> +      double_int tmp;
> +      tmp = *l0; *l0 = *l1; *l1 = tmp;
> +      tmp = *h0; *h0 = *h1; *h1 = tmp;
> +    }
> +}
>
>  /* Extract range information from a binary operation CODE based on
>     the ranges of each of its operands, *VR0 and *VR1 with resulting
>     type EXPR_TYPE.  The resulting range is stored in *VR.  */
>
>  static void
>  extract_range_from_binary_expr_1 (value_range_t *vr,
>                                   enum tree_code code, tree expr_type,
>                                   value_range_t *vr0_, value_range_t *vr1_)
>  {
>    value_range_t vr0 = *vr0_, vr1 = *vr1_;
> @@ -2562,20 +2584,137 @@ extract_range_from_binary_expr_1 (value_
>         {
>           /* For operations that make the resulting range directly
>              proportional to the original ranges, apply the operation to
>              the same end of each range.  */
>           min = vrp_int_const_binop (code, vr0.min, vr1.min);
>           max = vrp_int_const_binop (code, vr0.max, vr1.max);
>         }
>      }
>    else if (code == MULT_EXPR)
>      {
> +      /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
> +        drop to varying.  */
> +      if (range_int_cst_p (&vr0)
> +         && range_int_cst_p (&vr1)
> +         && TYPE_OVERFLOW_WRAPS (expr_type))
> +       {
> +         double_int min0, max0, min1, max1, sizem1, size;
> +         double_int prod0l, prod0h, prod1l, prod1h,
> +                    prod2l, prod2h, prod3l, prod3h;
> +         bool uns0, uns1, uns;
> +
> +         sizem1 = double_int_max_value (TYPE_PRECISION (expr_type), true);
> +         size = double_int_add (sizem1, double_int_one);
> +
> +         min0 = tree_to_double_int (vr0.min);
> +         max0 = tree_to_double_int (vr0.max);
> +         min1 = tree_to_double_int (vr1.min);
> +         max1 = tree_to_double_int (vr1.max);
> +
> +         uns0 = TYPE_UNSIGNED (expr_type);
> +         uns1 = uns0;
> +
> +         /* Canonicalize the intervals.  */
> +         if (TYPE_UNSIGNED (expr_type))
> +           {
> +             double_int min2 = double_int_sub (size, min0);
> +             if (double_int_cmp (min2, max0, true) < 0)
> +               {
> +                 min0 = double_int_neg (min2);
> +                 max0 = double_int_sub (max0, size);
> +                 uns0 = false;
> +               }
> +
> +             min2 = double_int_sub (size, min1);
> +             if (double_int_cmp (min2, max1, true) < 0)
> +               {
> +                 min1 = double_int_neg (min2);
> +                 max1 = double_int_sub (max1, size);
> +                 uns1 = false;
> +               }
> +           }
> +         uns = uns0 & uns1;
> +
> +         mul_double_wide_with_sign (min0.low, min0.high,
> +                                    min1.low, min1.high,
> +                                    &prod0l.low, &prod0l.high,
> +                                    &prod0h.low, &prod0h.high, true);
> +         if (!uns0 && double_int_negative_p (min0))
> +           prod0h = double_int_sub (prod0h, min1);
> +         if (!uns1 && double_int_negative_p (min1))
> +           prod0h = double_int_sub (prod0h, min0);
> +
> +         mul_double_wide_with_sign (min0.low, min0.high,
> +                                    max1.low, max1.high,
> +                                    &prod1l.low, &prod1l.high,
> +                                    &prod1h.low, &prod1h.high, true);
> +         if (!uns0 && double_int_negative_p (min0))
> +           prod1h = double_int_sub (prod1h, max1);
> +         if (!uns1 && double_int_negative_p (max1))
> +           prod1h = double_int_sub (prod1h, min0);
> +
> +         mul_double_wide_with_sign (max0.low, max0.high,
> +                                    min1.low, min1.high,
> +                                    &prod2l.low, &prod2l.high,
> +                                    &prod2h.low, &prod2h.high, true);
> +         if (!uns0 && double_int_negative_p (max0))
> +           prod2h = double_int_sub (prod2h, min1);
> +         if (!uns1 && double_int_negative_p (min1))
> +           prod2h = double_int_sub (prod2h, max0);
> +
> +         mul_double_wide_with_sign (max0.low, max0.high,
> +                                    max1.low, max1.high,
> +                                    &prod3l.low, &prod3l.high,
> +                                    &prod3h.low, &prod3h.high, true);
> +         if (!uns0 && double_int_negative_p (max0))
> +           prod3h = double_int_sub (prod3h, max1);
> +         if (!uns1 && double_int_negative_p (max1))
> +           prod3h = double_int_sub (prod3h, max0);
> +
> +         /* Sort the 4 products.  */
> +         quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns);
> +         quad_int_pair_sort (&prod1l, &prod1h, &prod2l, &prod2h, uns);
> +         quad_int_pair_sort (&prod0l, &prod0h, &prod1l, &prod1h, uns);
> +         quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns);
> +
> +         /* Max - min.  */
> +         if (double_int_zero_p (prod0l))
> +           {
> +             prod1l = double_int_zero;
> +             prod1h = double_int_neg (prod0h);
> +           }
> +         else
> +           {
> +             prod1l = double_int_neg (prod0l);
> +             prod1h = double_int_not (prod0h);
> +           }
> +         prod2l = double_int_add (prod3l, prod1l);
> +         prod2h = double_int_add (prod3h, prod1h);
> +         if (double_int_ucmp (prod2l, prod3l) < 0)
> +           prod2h = double_int_add (prod2h, double_int_one); /* carry */
> +
> +         if (!double_int_zero_p (prod2h)
> +             || double_int_cmp (prod2l, sizem1, true) >= 0)
> +           {
> +             /* the range covers all values.  */
> +             set_value_range_to_varying (vr);
> +             return;
> +           }
> +
> +         /* The following should handle the wrapping and selecting
> +            VR_ANTI_RANGE for us.  */
> +         min = double_int_to_tree (expr_type, prod0l);
> +         max = double_int_to_tree (expr_type, prod3l);
> +         set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
> +         return;
> +       }
> +
>        /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
>          drop to VR_VARYING.  It would take more effort to compute a
>          precise range for such a case.  For example, if we have
>          op0 == 65536 and op1 == 65536 with their ranges both being
>          ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
>          we cannot claim that the product is in ~[0,0].  Note that we
>          are guaranteed to have vr0.type == vr1.type at this
>          point.  */
>        if (vr0.type == VR_ANTI_RANGE
>           && !TYPE_OVERFLOW_UNDEFINED (expr_type))
> Index: gcc/testsuite/gcc.dg/tree-ssa/vrp77.c
> ===================================================================
> --- gcc/testsuite/gcc.dg/tree-ssa/vrp77.c       (revision 0)
> +++ gcc/testsuite/gcc.dg/tree-ssa/vrp77.c       (revision 0)
> @@ -0,0 +1,43 @@
> +/* { dg-do compile } */
> +/* { dg-options "-O2 -fdump-tree-optimized" } */
> +
> +#ifdef __SIZEOF_INT128__
> +#define T __int128
> +#else
> +#define T long long
> +#endif
> +
> +extern void impossible (void);
> +
> +void f(T x)
> +{
> +  unsigned T y;
> +  unsigned T z;
> +  if (x < -7)
> +    return;
> +  if (x > 2)
> +    return;
> +  y = x;
> +  z = y * y;
> +  if (z == 666)
> +    impossible ();
> +}
> +
> +void g(unsigned T x)
> +{
> +  unsigned T y;
> +  unsigned T z;
> +  unsigned T m = -1;
> +  m = m / 2;
> +  if (x < m-2)
> +    return;
> +  if (x > m-1)
> +    return;
> +  y = x;
> +  z = y * y;
> +  if (z == 7)
> +    impossible ();
> +}
> +
> +/* { dg-final { scan-tree-dump-not "impossible" "optimized" } } */
> +/* { dg-final { cleanup-tree-dump "optimized" } } */
>
> Property changes on: gcc/testsuite/gcc.dg/tree-ssa/vrp77.c
> ___________________________________________________________________
> Added: svn:keywords
>    + Author Date Id Revision URL
> Added: svn:eol-style
>    + native
>
> Index: gcc/double-int.c
> ===================================================================
> --- gcc/double-int.c    (revision 190103)
> +++ gcc/double-int.c    (working copy)
> @@ -128,27 +128,42 @@ neg_double (unsigned HOST_WIDE_INT l1, H
>     Each argument is given as two `HOST_WIDE_INT' pieces.
>     One argument is L1 and H1; the other, L2 and H2.
>     The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.  */
>
>  int
>  mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
>                       unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
>                       unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
>                       bool unsigned_p)
>  {
> +  unsigned HOST_WIDE_INT toplow;
> +  HOST_WIDE_INT tophigh;
> +
> +  return mul_double_wide_with_sign (l1, h1, l2, h2,
> +                                   lv, hv, &toplow, &tophigh,
> +                                   unsigned_p);
> +}
> +
> +int
> +mul_double_wide_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
> +                          unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
> +                          unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
> +                          unsigned HOST_WIDE_INT *lw, HOST_WIDE_INT *hw,
> +                          bool unsigned_p)
> +{
>    HOST_WIDE_INT arg1[4];
>    HOST_WIDE_INT arg2[4];
>    HOST_WIDE_INT prod[4 * 2];
>    unsigned HOST_WIDE_INT carry;
>    int i, j, k;
> -  unsigned HOST_WIDE_INT toplow, neglow;
> -  HOST_WIDE_INT tophigh, neghigh;
> +  unsigned HOST_WIDE_INT neglow;
> +  HOST_WIDE_INT neghigh;
>
>    encode (arg1, l1, h1);
>    encode (arg2, l2, h2);
>
>    memset (prod, 0, sizeof prod);
>
>    for (i = 0; i < 4; i++)
>      {
>        carry = 0;
>        for (j = 0; j < 4; j++)
> @@ -158,39 +173,39 @@ mul_double_with_sign (unsigned HOST_WIDE
>           carry += arg1[i] * arg2[j];
>           /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF.  */
>           carry += prod[k];
>           prod[k] = LOWPART (carry);
>           carry = HIGHPART (carry);
>         }
>        prod[i + 4] = carry;
>      }
>
>    decode (prod, lv, hv);
> -  decode (prod + 4, &toplow, &tophigh);
> +  decode (prod + 4, lw, hw);
>
>    /* Unsigned overflow is immediate.  */
>    if (unsigned_p)
> -    return (toplow | tophigh) != 0;
> +    return (*lw | *hw) != 0;
>
>    /* Check for signed overflow by calculating the signed representation of
> the
>       top half of the result; it should agree with the low half's sign bit.
> */
>    if (h1 < 0)
>      {
>        neg_double (l2, h2, &neglow, &neghigh);
> -      add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
> +      add_double (neglow, neghigh, *lw, *hw, lw, hw);
>      }
>    if (h2 < 0)
>      {
>        neg_double (l1, h1, &neglow, &neghigh);
> -      add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
> +      add_double (neglow, neghigh, *lw, *hw, lw, hw);
>      }
> -  return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
> +  return (*hv < 0 ? ~(*lw & *hw) : *lw | *hw) != 0;
>  }
>
>  /* Shift the doubleword integer in L1, H1 right by COUNT places
>     keeping only PREC bits of result.  ARITH nonzero specifies
>     arithmetic shifting; otherwise use logical shift.
>     Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV.  */
>
>  static void
>  rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
>                unsigned HOST_WIDE_INT count, unsigned int prec,
> Index: gcc/double-int.h
> ===================================================================
> --- gcc/double-int.h    (revision 190103)
> +++ gcc/double-int.h    (working copy)
> @@ -300,20 +300,25 @@ extern int add_double_with_sign (unsigne
>                                  unsigned HOST_WIDE_INT *, HOST_WIDE_INT *,
>                                  bool);
>  #define add_double(l1,h1,l2,h2,lv,hv) \
>    add_double_with_sign (l1, h1, l2, h2, lv, hv, false)
>  extern int neg_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
>                        unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
>  extern int mul_double_with_sign (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
>                                  unsigned HOST_WIDE_INT, HOST_WIDE_INT,
>                                  unsigned HOST_WIDE_INT *, HOST_WIDE_INT *,
>                                  bool);
> +extern int mul_double_wide_with_sign (unsigned HOST_WIDE_INT,
> HOST_WIDE_INT,
> +                                     unsigned HOST_WIDE_INT, HOST_WIDE_INT,
> +                                     unsigned HOST_WIDE_INT *,
> HOST_WIDE_INT *,
> +                                     unsigned HOST_WIDE_INT *,
> HOST_WIDE_INT *,
> +                                     bool);
>  #define mul_double(l1,h1,l2,h2,lv,hv) \
>    mul_double_with_sign (l1, h1, l2, h2, lv, hv, false)
>  extern void lshift_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
>                            HOST_WIDE_INT, unsigned int,
>                            unsigned HOST_WIDE_INT *, HOST_WIDE_INT *, bool);
>  extern int div_and_round_double (unsigned, int, unsigned HOST_WIDE_INT,
>                                  HOST_WIDE_INT, unsigned HOST_WIDE_INT,
>                                  HOST_WIDE_INT, unsigned HOST_WIDE_INT *,
>                                  HOST_WIDE_INT *, unsigned HOST_WIDE_INT *,
>                                  HOST_WIDE_INT *);
>

Patch

Index: gcc/tree-vrp.c

===================================================================
--- gcc/tree-vrp.c	(revision 190103)

+++ gcc/tree-vrp.c	(working copy)

@@ -2181,20 +2181,42 @@  extract_range_from_multiplicative_op_1 (

     {
       /* If the new range has its limits swapped around (MIN > MAX),
 	 then the operation caused one of them to wrap around, mark
 	 the new range VARYING.  */
       set_value_range_to_varying (vr);
     }
   else
     set_value_range (vr, type, min, max, NULL);
 }
 
+/* Some quadruple precision helpers.  */

+static int

+quad_int_cmp (double_int l0, double_int h0,

+	      double_int l1, double_int h1, bool uns)

+{

+  int c = double_int_cmp (h0, h1, uns);

+  if (c != 0) return c;

+  return double_int_ucmp (l0, l1);

+}

+

+static void

+quad_int_pair_sort (double_int *l0, double_int *h0,

+		    double_int *l1, double_int *h1, bool uns)

+{

+  if (quad_int_cmp (*l0, *h0, *l1, *h1, uns) > 0)

+    {

+      double_int tmp;

+      tmp = *l0; *l0 = *l1; *l1 = tmp;

+      tmp = *h0; *h0 = *h1; *h1 = tmp;

+    }

+}

+

 /* Extract range information from a binary operation CODE based on
    the ranges of each of its operands, *VR0 and *VR1 with resulting
    type EXPR_TYPE.  The resulting range is stored in *VR.  */
 
 static void
 extract_range_from_binary_expr_1 (value_range_t *vr,
 				  enum tree_code code, tree expr_type,
 				  value_range_t *vr0_, value_range_t *vr1_)
 {
   value_range_t vr0 = *vr0_, vr1 = *vr1_;
@@ -2562,20 +2584,137 @@  extract_range_from_binary_expr_1 (value_

 	{
 	  /* For operations that make the resulting range directly
 	     proportional to the original ranges, apply the operation to
 	     the same end of each range.  */
 	  min = vrp_int_const_binop (code, vr0.min, vr1.min);
 	  max = vrp_int_const_binop (code, vr0.max, vr1.max);
 	}
     }
   else if (code == MULT_EXPR)
     {
+      /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not

+	 drop to varying.  */

+      if (range_int_cst_p (&vr0)

+	  && range_int_cst_p (&vr1)

+	  && TYPE_OVERFLOW_WRAPS (expr_type))

+	{

+	  double_int min0, max0, min1, max1, sizem1, size;

+	  double_int prod0l, prod0h, prod1l, prod1h,

+		     prod2l, prod2h, prod3l, prod3h;

+	  bool uns0, uns1, uns;

+

+	  sizem1 = double_int_max_value (TYPE_PRECISION (expr_type), true);

+	  size = double_int_add (sizem1, double_int_one);

+

+	  min0 = tree_to_double_int (vr0.min);

+	  max0 = tree_to_double_int (vr0.max);

+	  min1 = tree_to_double_int (vr1.min);

+	  max1 = tree_to_double_int (vr1.max);

+

+	  uns0 = TYPE_UNSIGNED (expr_type);

+	  uns1 = uns0;

+

+	  /* Canonicalize the intervals.  */

+	  if (TYPE_UNSIGNED (expr_type))

+	    {

+	      double_int min2 = double_int_sub (size, min0);

+	      if (double_int_cmp (min2, max0, true) < 0)

+		{

+		  min0 = double_int_neg (min2);

+		  max0 = double_int_sub (max0, size);

+		  uns0 = false;

+		}

+

+	      min2 = double_int_sub (size, min1);

+	      if (double_int_cmp (min2, max1, true) < 0)

+		{

+		  min1 = double_int_neg (min2);

+		  max1 = double_int_sub (max1, size);

+		  uns1 = false;

+		}

+	    }

+	  uns = uns0 & uns1;

+

+	  mul_double_wide_with_sign (min0.low, min0.high,

+				     min1.low, min1.high,

+				     &prod0l.low, &prod0l.high,

+				     &prod0h.low, &prod0h.high, true);

+	  if (!uns0 && double_int_negative_p (min0))

+	    prod0h = double_int_sub (prod0h, min1);

+	  if (!uns1 && double_int_negative_p (min1))

+	    prod0h = double_int_sub (prod0h, min0);

+

+	  mul_double_wide_with_sign (min0.low, min0.high,

+				     max1.low, max1.high,

+				     &prod1l.low, &prod1l.high,

+				     &prod1h.low, &prod1h.high, true);

+	  if (!uns0 && double_int_negative_p (min0))

+	    prod1h = double_int_sub (prod1h, max1);

+	  if (!uns1 && double_int_negative_p (max1))

+	    prod1h = double_int_sub (prod1h, min0);

+

+	  mul_double_wide_with_sign (max0.low, max0.high,

+				     min1.low, min1.high,

+				     &prod2l.low, &prod2l.high,

+				     &prod2h.low, &prod2h.high, true);

+	  if (!uns0 && double_int_negative_p (max0))

+	    prod2h = double_int_sub (prod2h, min1);

+	  if (!uns1 && double_int_negative_p (min1))

+	    prod2h = double_int_sub (prod2h, max0);

+

+	  mul_double_wide_with_sign (max0.low, max0.high,

+				     max1.low, max1.high,

+				     &prod3l.low, &prod3l.high,

+				     &prod3h.low, &prod3h.high, true);

+	  if (!uns0 && double_int_negative_p (max0))

+	    prod3h = double_int_sub (prod3h, max1);

+	  if (!uns1 && double_int_negative_p (max1))

+	    prod3h = double_int_sub (prod3h, max0);

+

+	  /* Sort the 4 products.  */

+	  quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns);

+	  quad_int_pair_sort (&prod1l, &prod1h, &prod2l, &prod2h, uns);

+	  quad_int_pair_sort (&prod0l, &prod0h, &prod1l, &prod1h, uns);

+	  quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns);

+

+	  /* Max - min.  */

+	  if (double_int_zero_p (prod0l))

+	    {

+	      prod1l = double_int_zero;

+	      prod1h = double_int_neg (prod0h);

+	    }

+	  else

+	    {

+	      prod1l = double_int_neg (prod0l);

+	      prod1h = double_int_not (prod0h);

+	    }

+	  prod2l = double_int_add (prod3l, prod1l);

+	  prod2h = double_int_add (prod3h, prod1h);

+	  if (double_int_ucmp (prod2l, prod3l) < 0)

+	    prod2h = double_int_add (prod2h, double_int_one); /* carry */

+

+	  if (!double_int_zero_p (prod2h)

+	      || double_int_cmp (prod2l, sizem1, true) >= 0)

+	    {

+	      /* the range covers all values.  */

+	      set_value_range_to_varying (vr);

+	      return;

+	    }

+

+	  /* The following should handle the wrapping and selecting

+	     VR_ANTI_RANGE for us.  */

+	  min = double_int_to_tree (expr_type, prod0l);

+	  max = double_int_to_tree (expr_type, prod3l);

+	  set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);

+	  return;

+	}

+

       /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
 	 drop to VR_VARYING.  It would take more effort to compute a
 	 precise range for such a case.  For example, if we have
 	 op0 == 65536 and op1 == 65536 with their ranges both being
 	 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
 	 we cannot claim that the product is in ~[0,0].  Note that we
 	 are guaranteed to have vr0.type == vr1.type at this
 	 point.  */
       if (vr0.type == VR_ANTI_RANGE
 	  && !TYPE_OVERFLOW_UNDEFINED (expr_type))
Index: gcc/testsuite/gcc.dg/tree-ssa/vrp77.c

===================================================================
--- gcc/testsuite/gcc.dg/tree-ssa/vrp77.c	(revision 0)

+++ gcc/testsuite/gcc.dg/tree-ssa/vrp77.c	(revision 0)

@@ -0,0 +1,43 @@ 

+/* { dg-do compile } */

+/* { dg-options "-O2 -fdump-tree-optimized" } */

+

+#ifdef __SIZEOF_INT128__

+#define T __int128

+#else

+#define T long long

+#endif

+

+extern void impossible (void);

+

+void f(T x)

+{

+  unsigned T y;

+  unsigned T z;

+  if (x < -7)

+    return;

+  if (x > 2)

+    return;

+  y = x;

+  z = y * y;

+  if (z == 666)

+    impossible ();

+}

+

+void g(unsigned T x)

+{

+  unsigned T y;

+  unsigned T z;

+  unsigned T m = -1;

+  m = m / 2;

+  if (x < m-2)

+    return;

+  if (x > m-1)

+    return;

+  y = x;

+  z = y * y;

+  if (z == 7)

+    impossible ();

+}

+

+/* { dg-final { scan-tree-dump-not "impossible" "optimized" } } */

+/* { dg-final { cleanup-tree-dump "optimized" } } */


Property changes on: gcc/testsuite/gcc.dg/tree-ssa/vrp77.c
___________________________________________________________________
Added: svn:keywords
   + Author Date Id Revision URL
Added: svn:eol-style
   + native

Index: gcc/double-int.c

===================================================================
--- gcc/double-int.c	(revision 190103)

+++ gcc/double-int.c	(working copy)

@@ -128,27 +128,42 @@  neg_double (unsigned HOST_WIDE_INT l1, H

    Each argument is given as two `HOST_WIDE_INT' pieces.
    One argument is L1 and H1; the other, L2 and H2.
    The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.  */
 
 int
 mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
 		      unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
 		      unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
 		      bool unsigned_p)
 {
+  unsigned HOST_WIDE_INT toplow;

+  HOST_WIDE_INT tophigh;

+

+  return mul_double_wide_with_sign (l1, h1, l2, h2,

+				    lv, hv, &toplow, &tophigh,

+				    unsigned_p);

+}

+

+int

+mul_double_wide_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,

+			   unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,

+			   unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,

+			   unsigned HOST_WIDE_INT *lw, HOST_WIDE_INT *hw,

+			   bool unsigned_p)

+{

   HOST_WIDE_INT arg1[4];
   HOST_WIDE_INT arg2[4];
   HOST_WIDE_INT prod[4 * 2];
   unsigned HOST_WIDE_INT carry;
   int i, j, k;
-  unsigned HOST_WIDE_INT toplow, neglow;

-  HOST_WIDE_INT tophigh, neghigh;

+  unsigned HOST_WIDE_INT neglow;

+  HOST_WIDE_INT neghigh;

 
   encode (arg1, l1, h1);
   encode (arg2, l2, h2);
 
   memset (prod, 0, sizeof prod);
 
   for (i = 0; i < 4; i++)
     {
       carry = 0;
       for (j = 0; j < 4; j++)
@@ -158,39 +173,39 @@  mul_double_with_sign (unsigned HOST_WIDE

 	  carry += arg1[i] * arg2[j];
 	  /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF.  */
 	  carry += prod[k];
 	  prod[k] = LOWPART (carry);
 	  carry = HIGHPART (carry);
 	}
       prod[i + 4] = carry;
     }
 
   decode (prod, lv, hv);
-  decode (prod + 4, &toplow, &tophigh);

+  decode (prod + 4, lw, hw);

 
   /* Unsigned overflow is immediate.  */
   if (unsigned_p)
-    return (toplow | tophigh) != 0;

+    return (*lw | *hw) != 0;

 
   /* Check for signed overflow by calculating the signed representation of the
      top half of the result; it should agree with the low half's sign bit.  */
   if (h1 < 0)
     {
       neg_double (l2, h2, &neglow, &neghigh);
-      add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);

+      add_double (neglow, neghigh, *lw, *hw, lw, hw);

     }
   if (h2 < 0)
     {
       neg_double (l1, h1, &neglow, &neghigh);
-      add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);

+      add_double (neglow, neghigh, *lw, *hw, lw, hw);

     }
-  return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;

+  return (*hv < 0 ? ~(*lw & *hw) : *lw | *hw) != 0;

 }
 
 /* Shift the doubleword integer in L1, H1 right by COUNT places
    keeping only PREC bits of result.  ARITH nonzero specifies
    arithmetic shifting; otherwise use logical shift.
    Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV.  */
 
 static void
 rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
 	       unsigned HOST_WIDE_INT count, unsigned int prec,
Index: gcc/double-int.h

===================================================================
--- gcc/double-int.h	(revision 190103)

+++ gcc/double-int.h	(working copy)

@@ -300,20 +300,25 @@  extern int add_double_with_sign (unsigne

 				 unsigned HOST_WIDE_INT *, HOST_WIDE_INT *,
 				 bool);
 #define add_double(l1,h1,l2,h2,lv,hv) \
   add_double_with_sign (l1, h1, l2, h2, lv, hv, false)
 extern int neg_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
 		       unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
 extern int mul_double_with_sign (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
 				 unsigned HOST_WIDE_INT, HOST_WIDE_INT,
 				 unsigned HOST_WIDE_INT *, HOST_WIDE_INT *,
 				 bool);
+extern int mul_double_wide_with_sign (unsigned HOST_WIDE_INT, HOST_WIDE_INT,

+				      unsigned HOST_WIDE_INT, HOST_WIDE_INT,

+				      unsigned HOST_WIDE_INT *, HOST_WIDE_INT *,

+				      unsigned HOST_WIDE_INT *, HOST_WIDE_INT *,

+				      bool);

 #define mul_double(l1,h1,l2,h2,lv,hv) \
   mul_double_with_sign (l1, h1, l2, h2, lv, hv, false)
 extern void lshift_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT,
 			   HOST_WIDE_INT, unsigned int,
 			   unsigned HOST_WIDE_INT *, HOST_WIDE_INT *, bool);
 extern int div_and_round_double (unsigned, int, unsigned HOST_WIDE_INT,
 				 HOST_WIDE_INT, unsigned HOST_WIDE_INT,
 				 HOST_WIDE_INT, unsigned HOST_WIDE_INT *,
 				 HOST_WIDE_INT *, unsigned HOST_WIDE_INT *,
 				 HOST_WIDE_INT *);