Patchwork [wide-int] Make shifted_mask more forgiving

login
register
mail settings
Submitter Richard Sandiford
Date Nov. 2, 2013, 11:52 a.m.
Message ID <87d2mjja1q.fsf@talisman.default>
Download mbox | patch
Permalink /patch/287976/
State New
Headers show

Comments

Richard Sandiford - Nov. 2, 2013, 11:52 a.m.
r201806 added some extra checks to the handling of CONCAT when expanding
assignments.  This was to fix an ICE on gcc.dg/pr48335-2.c for ppc.

I tripped over this because it also causes the assembly output for
the test to change.  I tried backing out the patch and the ICE was
from the assert in wide-int.cc:shifted_mask:

  gcc_assert (start < 4 * MAX_BITSIZE_MODE_ANY_INT);

However, in this case, the mask was being generated by mask_rtx,
which specifically allows inputs that are out of range of the mode:

/* Return a constant integer mask value of mode MODE with BITSIZE ones
   followed by BITPOS zeros, or the complement of that if COMPLEMENT.
   The mask is truncated if necessary to the width of mode MODE.  The
   mask is zero-extended if BITSIZE+BITPOS is too small for MODE.  */

So I don't think these inputs should trigger an ICE, even if in this
particularly case they're conceptually wrong for some reason.

Since wide-int.cc:shifted_mask already handles zero-width masks
and cases where the end bit is out of range, I think we should
handle the start being out of range too.

It might be that 201806 is a good thing anyway (no idea either way),
but I think it should go directly on mainline first if so.

Tested on powerpc64-linux-gnu and x86_64-linux-gnu.  It fixes the
pr48335-2.c assembly differences.  OK to install?

Thanks,
Richard
Kenneth Zadeck - Nov. 2, 2013, 2:02 p.m.
this is fine with me.

kenny

On 11/02/2013 07:52 AM, Richard Sandiford wrote:
> r201806 added some extra checks to the handling of CONCAT when expanding
> assignments.  This was to fix an ICE on gcc.dg/pr48335-2.c for ppc.
>
> I tripped over this because it also causes the assembly output for
> the test to change.  I tried backing out the patch and the ICE was
> from the assert in wide-int.cc:shifted_mask:
>
>    gcc_assert (start < 4 * MAX_BITSIZE_MODE_ANY_INT);
>
> However, in this case, the mask was being generated by mask_rtx,
> which specifically allows inputs that are out of range of the mode:
>
> /* Return a constant integer mask value of mode MODE with BITSIZE ones
>     followed by BITPOS zeros, or the complement of that if COMPLEMENT.
>     The mask is truncated if necessary to the width of mode MODE.  The
>     mask is zero-extended if BITSIZE+BITPOS is too small for MODE.  */
>
> So I don't think these inputs should trigger an ICE, even if in this
> particularly case they're conceptually wrong for some reason.
>
> Since wide-int.cc:shifted_mask already handles zero-width masks
> and cases where the end bit is out of range, I think we should
> handle the start being out of range too.
>
> It might be that 201806 is a good thing anyway (no idea either way),
> but I think it should go directly on mainline first if so.
>
> Tested on powerpc64-linux-gnu and x86_64-linux-gnu.  It fixes the
> pr48335-2.c assembly differences.  OK to install?
>
> Thanks,
> Richard
>
>
> Index: gcc/expr.c
> ===================================================================
> --- gcc/expr.c	2013-11-02 11:15:32.739837369 +0000
> +++ gcc/expr.c	2013-11-02 11:15:42.562911693 +0000
> @@ -4765,14 +4765,12 @@ expand_assignment (tree to, tree from, b
>   		   && (bitpos == 0 || bitpos == mode_bitsize / 2))
>   	    result = store_expr (from, XEXP (to_rtx, bitpos != 0), false,
>   				 nontemporal);
> -	  else if (bitpos + bitsize <= mode_bitsize / 2
> -		   && bitpos+bitsize <= mode_bitsize)
> +	  else if (bitpos + bitsize <= mode_bitsize / 2)
>   	    result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
>   				  bitregion_start, bitregion_end,
>   				  mode1, from,
>   				  get_alias_set (to), nontemporal);
> -	  else if (bitpos >= mode_bitsize / 2
> -		   && bitpos+bitsize <= mode_bitsize)
> +	  else if (bitpos >= mode_bitsize / 2)
>   	    result = store_field (XEXP (to_rtx, 1), bitsize,
>   				  bitpos - mode_bitsize / 2,
>   				  bitregion_start, bitregion_end,
> @@ -4791,12 +4789,8 @@ expand_assignment (tree to, tree from, b
>   	    }
>   	  else
>   	    {
> -	      HOST_WIDE_INT extra = 0;
> -              if (bitpos+bitsize > mode_bitsize)
> -                extra = bitpos+bitsize - mode_bitsize;
>   	      rtx temp = assign_stack_temp (GET_MODE (to_rtx),
> -					    GET_MODE_SIZE (GET_MODE (to_rtx))
> -					    + extra);
> +					    GET_MODE_SIZE (GET_MODE (to_rtx)));
>   	      write_complex_part (temp, XEXP (to_rtx, 0), false);
>   	      write_complex_part (temp, XEXP (to_rtx, 1), true);
>   	      result = store_field (temp, bitsize, bitpos,
> Index: gcc/wide-int.cc
> ===================================================================
> --- gcc/wide-int.cc	2013-11-02 11:15:32.740837376 +0000
> +++ gcc/wide-int.cc	2013-11-02 11:15:42.562911693 +0000
> @@ -748,18 +748,16 @@ unsigned int
>   wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
>   		  bool negate, unsigned int prec)
>   {
> -  gcc_assert (start < 4 * MAX_BITSIZE_MODE_ANY_INT);
> -
> -  if (start + width > prec)
> -    width = prec - start;
> -  unsigned int end = start + width;
> -
> -  if (width == 0)
> +  if (start >= prec || width == 0)
>       {
>         val[0] = negate ? -1 : 0;
>         return 1;
>       }
>   
> +  if (width > prec - start)
> +    width = prec - start;
> +  unsigned int end = start + width;
> +
>     unsigned int i = 0;
>     while (i < start / HOST_BITS_PER_WIDE_INT)
>       val[i++] = negate ? -1 : 0;

Patch

Index: gcc/expr.c
===================================================================
--- gcc/expr.c	2013-11-02 11:15:32.739837369 +0000
+++ gcc/expr.c	2013-11-02 11:15:42.562911693 +0000
@@ -4765,14 +4765,12 @@  expand_assignment (tree to, tree from, b
 		   && (bitpos == 0 || bitpos == mode_bitsize / 2))
 	    result = store_expr (from, XEXP (to_rtx, bitpos != 0), false,
 				 nontemporal);
-	  else if (bitpos + bitsize <= mode_bitsize / 2
-		   && bitpos+bitsize <= mode_bitsize)
+	  else if (bitpos + bitsize <= mode_bitsize / 2)
 	    result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
 				  bitregion_start, bitregion_end,
 				  mode1, from,
 				  get_alias_set (to), nontemporal);
-	  else if (bitpos >= mode_bitsize / 2
-		   && bitpos+bitsize <= mode_bitsize)
+	  else if (bitpos >= mode_bitsize / 2)
 	    result = store_field (XEXP (to_rtx, 1), bitsize,
 				  bitpos - mode_bitsize / 2,
 				  bitregion_start, bitregion_end,
@@ -4791,12 +4789,8 @@  expand_assignment (tree to, tree from, b
 	    }
 	  else
 	    {
-	      HOST_WIDE_INT extra = 0;
-              if (bitpos+bitsize > mode_bitsize)
-                extra = bitpos+bitsize - mode_bitsize;
 	      rtx temp = assign_stack_temp (GET_MODE (to_rtx),
-					    GET_MODE_SIZE (GET_MODE (to_rtx))
-					    + extra);
+					    GET_MODE_SIZE (GET_MODE (to_rtx)));
 	      write_complex_part (temp, XEXP (to_rtx, 0), false);
 	      write_complex_part (temp, XEXP (to_rtx, 1), true);
 	      result = store_field (temp, bitsize, bitpos,
Index: gcc/wide-int.cc
===================================================================
--- gcc/wide-int.cc	2013-11-02 11:15:32.740837376 +0000
+++ gcc/wide-int.cc	2013-11-02 11:15:42.562911693 +0000
@@ -748,18 +748,16 @@  unsigned int
 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
 		  bool negate, unsigned int prec)
 {
-  gcc_assert (start < 4 * MAX_BITSIZE_MODE_ANY_INT);
-
-  if (start + width > prec)
-    width = prec - start;
-  unsigned int end = start + width;
-
-  if (width == 0)
+  if (start >= prec || width == 0)
     {
       val[0] = negate ? -1 : 0;
       return 1;
     }
 
+  if (width > prec - start)
+    width = prec - start;
+  unsigned int end = start + width;
+
   unsigned int i = 0;
   while (i < start / HOST_BITS_PER_WIDE_INT)
     val[i++] = negate ? -1 : 0;