diff mbox

[ARM] Remove an unused reload hook.

Message ID 54E5D4BA.9010505@arm.com
State New
Headers show

Commit Message

Matthew Wahab Feb. 19, 2015, 12:19 p.m. UTC
The LEGITIMIZE_RELOAD_ADDRESS macro is only needed for reload. Since the 
ARM backend no longer supports reload, this macro is not needed and this 
patch removes it.

Tested arm-none-linux-gnueabihf with gcc-check.

Ok for trunk? now or in stage 1?
Matthew

gcc/
2015-02-19  Matthew Wahab  <matthew.wahab@arm.com>

	* config/arm/arm.h (LEGITIMIZE_RELOAD_ADDRESS): Remove.
	(ARM_LEGITIMIZE_RELOAD_ADDRESS): Remove.
	(THUMB_LEGITIMIZE_RELOAD_ADDRESS): Remove.
	* config/arm/arm.c (arm_legitimize_reload_address): Remove.
	(thumb_legitimize_reload_address): Remove.
	* config/arm/arm-protos.h (arm_legitimize_reload_address):
	Remove.
	(thumb_legitimize_reload_address): Remove.

Comments

Richard Earnshaw Feb. 27, 2015, 9:41 a.m. UTC | #1
On 19/02/15 12:19, Matthew Wahab wrote:
> The LEGITIMIZE_RELOAD_ADDRESS macro is only needed for reload. Since the
> ARM backend no longer supports reload, this macro is not needed and this
> patch removes it.
> 
> Tested arm-none-linux-gnueabihf with gcc-check.
> 
> Ok for trunk? now or in stage 1?
> Matthew
> 
> gcc/
> 2015-02-19  Matthew Wahab  <matthew.wahab@arm.com>
> 
>     * config/arm/arm.h (LEGITIMIZE_RELOAD_ADDRESS): Remove.
>     (ARM_LEGITIMIZE_RELOAD_ADDRESS): Remove.
>     (THUMB_LEGITIMIZE_RELOAD_ADDRESS): Remove.
>     * config/arm/arm.c (arm_legitimize_reload_address): Remove.
>     (thumb_legitimize_reload_address): Remove.
>     * config/arm/arm-protos.h (arm_legitimize_reload_address):
>     Remove.
>     (thumb_legitimize_reload_address): Remove.
> 

This is OK for stage 1.

I have one open question: can LRA generate the optimizations that these
hooks used to provide through reload?  If not, please could you file
some bugzilla reports so that we don't lose them.

Thanks,
R.

> remove_dead_code.patch
> 
> 
> diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h
> index 307babb..0595cc2 100644
> --- a/gcc/config/arm/arm-protos.h
> +++ b/gcc/config/arm/arm-protos.h
> @@ -66,10 +66,6 @@ extern rtx legitimize_tls_address (rtx, rtx);
>  extern bool arm_legitimate_address_p (machine_mode, rtx, bool);
>  extern int arm_legitimate_address_outer_p (machine_mode, rtx, RTX_CODE, int);
>  extern int thumb_legitimate_offset_p (machine_mode, HOST_WIDE_INT);
> -extern bool arm_legitimize_reload_address (rtx *, machine_mode, int, int,
> -					   int);
> -extern rtx thumb_legitimize_reload_address (rtx *, machine_mode, int, int,
> -					    int);
>  extern int thumb1_legitimate_address_p (machine_mode, rtx, int);
>  extern bool ldm_stm_operation_p (rtx, bool, machine_mode mode,
>                                   bool, bool);
> diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
> index 7bf5b4d..6efe664 100644
> --- a/gcc/config/arm/arm.c
> +++ b/gcc/config/arm/arm.c
> @@ -7932,236 +7932,6 @@ thumb_legitimize_address (rtx x, rtx orig_x, machine_mode mode)
>    return x;
>  }
>  
> -bool
> -arm_legitimize_reload_address (rtx *p,
> -			       machine_mode mode,
> -			       int opnum, int type,
> -			       int ind_levels ATTRIBUTE_UNUSED)
> -{
> -  /* We must recognize output that we have already generated ourselves.  */
> -  if (GET_CODE (*p) == PLUS
> -      && GET_CODE (XEXP (*p, 0)) == PLUS
> -      && REG_P (XEXP (XEXP (*p, 0), 0))
> -      && CONST_INT_P (XEXP (XEXP (*p, 0), 1))
> -      && CONST_INT_P (XEXP (*p, 1)))
> -    {
> -      push_reload (XEXP (*p, 0), NULL_RTX, &XEXP (*p, 0), NULL,
> -		   MODE_BASE_REG_CLASS (mode), GET_MODE (*p),
> -		   VOIDmode, 0, 0, opnum, (enum reload_type) type);
> -      return true;
> -    }
> -
> -  if (GET_CODE (*p) == PLUS
> -      && REG_P (XEXP (*p, 0))
> -      && ARM_REGNO_OK_FOR_BASE_P (REGNO (XEXP (*p, 0)))
> -      /* If the base register is equivalent to a constant, let the generic
> -	 code handle it.  Otherwise we will run into problems if a future
> -	 reload pass decides to rematerialize the constant.  */
> -      && !reg_equiv_constant (ORIGINAL_REGNO (XEXP (*p, 0)))
> -      && CONST_INT_P (XEXP (*p, 1)))
> -    {
> -      HOST_WIDE_INT val = INTVAL (XEXP (*p, 1));
> -      HOST_WIDE_INT low, high;
> -
> -      /* Detect coprocessor load/stores.  */
> -      bool coproc_p = ((TARGET_HARD_FLOAT
> -			&& TARGET_VFP
> -			&& (mode == SFmode || mode == DFmode))
> -		       || (TARGET_REALLY_IWMMXT
> -			   && VALID_IWMMXT_REG_MODE (mode))
> -		       || (TARGET_NEON
> -			   && (VALID_NEON_DREG_MODE (mode)
> -			       || VALID_NEON_QREG_MODE (mode))));
> -
> -      /* For some conditions, bail out when lower two bits are unaligned.  */
> -      if ((val & 0x3) != 0
> -	  /* Coprocessor load/store indexes are 8-bits + '00' appended.  */
> -	  && (coproc_p
> -	      /* For DI, and DF under soft-float: */
> -	      || ((mode == DImode || mode == DFmode)
> -		  /* Without ldrd, we use stm/ldm, which does not
> -		     fair well with unaligned bits.  */
> -		  && (! TARGET_LDRD
> -		      /* Thumb-2 ldrd/strd is [-1020,+1020] in steps of 4.  */
> -		      || TARGET_THUMB2))))
> -	return false;
> -
> -      /* When breaking down a [reg+index] reload address into [(reg+high)+low],
> -	 of which the (reg+high) gets turned into a reload add insn,
> -	 we try to decompose the index into high/low values that can often
> -	 also lead to better reload CSE.
> -	 For example:
> -	         ldr r0, [r2, #4100]  // Offset too large
> -		 ldr r1, [r2, #4104]  // Offset too large
> -
> -	 is best reloaded as:
> -	         add t1, r2, #4096
> -		 ldr r0, [t1, #4]
> -		 add t2, r2, #4096
> -		 ldr r1, [t2, #8]
> -
> -	 which post-reload CSE can simplify in most cases to eliminate the
> -	 second add instruction:
> -	         add t1, r2, #4096
> -		 ldr r0, [t1, #4]
> -		 ldr r1, [t1, #8]
> -
> -	 The idea here is that we want to split out the bits of the constant
> -	 as a mask, rather than as subtracting the maximum offset that the
> -	 respective type of load/store used can handle.
> -
> -	 When encountering negative offsets, we can still utilize it even if
> -	 the overall offset is positive; sometimes this may lead to an immediate
> -	 that can be constructed with fewer instructions.
> -	 For example:
> -	         ldr r0, [r2, #0x3FFFFC]
> -
> -	 This is best reloaded as:
> -	         add t1, r2, #0x400000
> -		 ldr r0, [t1, #-4]
> -
> -	 The trick for spotting this for a load insn with N bits of offset
> -	 (i.e. bits N-1:0) is to look at bit N; if it is set, then chose a
> -	 negative offset that is going to make bit N and all the bits below
> -	 it become zero in the remainder part.
> -
> -	 The SIGN_MAG_LOW_ADDR_BITS macro below implements this, with respect
> -	 to sign-magnitude addressing (i.e. separate +- bit, or 1's complement),
> -	 used in most cases of ARM load/store instructions.  */
> -
> -#define SIGN_MAG_LOW_ADDR_BITS(VAL, N)					\
> -      (((VAL) & ((1 << (N)) - 1))					\
> -       ? (((VAL) & ((1 << ((N) + 1)) - 1)) ^ (1 << (N))) - (1 << (N))	\
> -       : 0)
> -
> -      if (coproc_p)
> -	{
> -	  low = SIGN_MAG_LOW_ADDR_BITS (val, 10);
> -
> -	  /* NEON quad-word load/stores are made of two double-word accesses,
> -	     so the valid index range is reduced by 8. Treat as 9-bit range if
> -	     we go over it.  */
> -	  if (TARGET_NEON && VALID_NEON_QREG_MODE (mode) && low >= 1016)
> -	    low = SIGN_MAG_LOW_ADDR_BITS (val, 9);
> -	}
> -      else if (GET_MODE_SIZE (mode) == 8)
> -	{
> -	  if (TARGET_LDRD)
> -	    low = (TARGET_THUMB2
> -		   ? SIGN_MAG_LOW_ADDR_BITS (val, 10)
> -		   : SIGN_MAG_LOW_ADDR_BITS (val, 8));
> -	  else
> -	    /* For pre-ARMv5TE (without ldrd), we use ldm/stm(db/da/ib)
> -	       to access doublewords. The supported load/store offsets are
> -	       -8, -4, and 4, which we try to produce here.  */
> -	    low = ((val & 0xf) ^ 0x8) - 0x8;
> -	}
> -      else if (GET_MODE_SIZE (mode) < 8)
> -	{
> -	  /* NEON element load/stores do not have an offset.  */
> -	  if (TARGET_NEON_FP16 && mode == HFmode)
> -	    return false;
> -
> -	  if (TARGET_THUMB2)
> -	    {
> -	      /* Thumb-2 has an asymmetrical index range of (-256,4096).
> -		 Try the wider 12-bit range first, and re-try if the result
> -		 is out of range.  */
> -	      low = SIGN_MAG_LOW_ADDR_BITS (val, 12);
> -	      if (low < -255)
> -		low = SIGN_MAG_LOW_ADDR_BITS (val, 8);
> -	    }
> -	  else
> -	    {
> -	      if (mode == HImode || mode == HFmode)
> -		{
> -		  if (arm_arch4)
> -		    low = SIGN_MAG_LOW_ADDR_BITS (val, 8);
> -		  else
> -		    {
> -		      /* The storehi/movhi_bytes fallbacks can use only
> -			 [-4094,+4094] of the full ldrb/strb index range.  */
> -		      low = SIGN_MAG_LOW_ADDR_BITS (val, 12);
> -		      if (low == 4095 || low == -4095)
> -			return false;
> -		    }
> -		}
> -	      else
> -		low = SIGN_MAG_LOW_ADDR_BITS (val, 12);
> -	    }
> -	}
> -      else
> -	return false;
> -
> -      high = ((((val - low) & (unsigned HOST_WIDE_INT) 0xffffffff)
> -	       ^ (unsigned HOST_WIDE_INT) 0x80000000)
> -	      - (unsigned HOST_WIDE_INT) 0x80000000);
> -      /* Check for overflow or zero */
> -      if (low == 0 || high == 0 || (high + low != val))
> -	return false;
> -
> -      /* Reload the high part into a base reg; leave the low part
> -	 in the mem.
> -	 Note that replacing this gen_rtx_PLUS with plus_constant is
> -	 wrong in this case because we rely on the
> -	 (plus (plus reg c1) c2) structure being preserved so that
> -	 XEXP (*p, 0) in push_reload below uses the correct term.  */
> -      *p = gen_rtx_PLUS (GET_MODE (*p),
> -			 gen_rtx_PLUS (GET_MODE (*p), XEXP (*p, 0),
> -				       GEN_INT (high)),
> -			 GEN_INT (low));
> -      push_reload (XEXP (*p, 0), NULL_RTX, &XEXP (*p, 0), NULL,
> -		   MODE_BASE_REG_CLASS (mode), GET_MODE (*p),
> -		   VOIDmode, 0, 0, opnum, (enum reload_type) type);
> -      return true;
> -    }
> -
> -  return false;
> -}
> -
> -rtx
> -thumb_legitimize_reload_address (rtx *x_p,
> -				 machine_mode mode,
> -				 int opnum, int type,
> -				 int ind_levels ATTRIBUTE_UNUSED)
> -{
> -  rtx x = *x_p;
> -
> -  if (GET_CODE (x) == PLUS
> -      && GET_MODE_SIZE (mode) < 4
> -      && REG_P (XEXP (x, 0))
> -      && XEXP (x, 0) == stack_pointer_rtx
> -      && CONST_INT_P (XEXP (x, 1))
> -      && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
> -    {
> -      rtx orig_x = x;
> -
> -      x = copy_rtx (x);
> -      push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
> -		   Pmode, VOIDmode, 0, 0, opnum, (enum reload_type) type);
> -      return x;
> -    }
> -
> -  /* If both registers are hi-regs, then it's better to reload the
> -     entire expression rather than each register individually.  That
> -     only requires one reload register rather than two.  */
> -  if (GET_CODE (x) == PLUS
> -      && REG_P (XEXP (x, 0))
> -      && REG_P (XEXP (x, 1))
> -      && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
> -      && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
> -    {
> -      rtx orig_x = x;
> -
> -      x = copy_rtx (x);
> -      push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
> -		   Pmode, VOIDmode, 0, 0, opnum, (enum reload_type) type);
> -      return x;
> -    }
> -
> -  return NULL;
> -}
> -
>  /* Return TRUE if X contains any TLS symbol references.  */
>  
>  bool
> diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
> index 297dfe1..fc34782 100644
> --- a/gcc/config/arm/arm.h
> +++ b/gcc/config/arm/arm.h
> @@ -1354,46 +1354,6 @@ enum reg_class
>       ? GENERAL_REGS : NO_REGS)					\
>      : THUMB_SECONDARY_INPUT_RELOAD_CLASS (CLASS, MODE, X)))
>  
> -/* Try a machine-dependent way of reloading an illegitimate address
> -   operand.  If we find one, push the reload and jump to WIN.  This
> -   macro is used in only one place: `find_reloads_address' in reload.c.
> -
> -   For the ARM, we wish to handle large displacements off a base
> -   register by splitting the addend across a MOV and the mem insn.
> -   This can cut the number of reloads needed.  */
> -#define ARM_LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND, WIN)	   \
> -  do									   \
> -    {									   \
> -      if (arm_legitimize_reload_address (&X, MODE, OPNUM, TYPE, IND))	   \
> -	goto WIN;							   \
> -    }									   \
> -  while (0)
> -
> -/* XXX If an HImode FP+large_offset address is converted to an HImode
> -   SP+large_offset address, then reload won't know how to fix it.  It sees
> -   only that SP isn't valid for HImode, and so reloads the SP into an index
> -   register, but the resulting address is still invalid because the offset
> -   is too big.  We fix it here instead by reloading the entire address.  */
> -/* We could probably achieve better results by defining PROMOTE_MODE to help
> -   cope with the variances between the Thumb's signed and unsigned byte and
> -   halfword load instructions.  */
> -/* ??? This should be safe for thumb2, but we may be able to do better.  */
> -#define THUMB_LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_L, WIN)     \
> -do {									      \
> -  rtx new_x = thumb_legitimize_reload_address (&X, MODE, OPNUM, TYPE, IND_L); \
> -  if (new_x)								      \
> -    {									      \
> -      X = new_x;							      \
> -      goto WIN;								      \
> -    }									      \
> -} while (0)
> -
> -#define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_LEVELS, WIN)   \
> -  if (TARGET_ARM)							   \
> -    ARM_LEGITIMIZE_RELOAD_ADDRESS (X, MODE, OPNUM, TYPE, IND_LEVELS, WIN); \
> -  else									   \
> -    THUMB_LEGITIMIZE_RELOAD_ADDRESS (X, MODE, OPNUM, TYPE, IND_LEVELS, WIN)
> -
>  /* Return the maximum number of consecutive registers
>     needed to represent mode MODE in a register of class CLASS.
>     ARM regs are UNITS_PER_WORD bits.  
>
Matthew Wahab March 5, 2015, 4:34 p.m. UTC | #2
On 27/02/15 09:41, Richard Earnshaw wrote:
> On 19/02/15 12:19, Matthew Wahab wrote:
>> The LEGITIMIZE_RELOAD_ADDRESS macro is only needed for reload. Since the
>> ARM backend no longer supports reload, this macro is not needed and this
>> patch removes it.
>>
>> gcc/
>> 2015-02-19  Matthew Wahab  <matthew.wahab@arm.com>
>>
>>      * config/arm/arm.h (LEGITIMIZE_RELOAD_ADDRESS): Remove.
>>      (ARM_LEGITIMIZE_RELOAD_ADDRESS): Remove.
>>      (THUMB_LEGITIMIZE_RELOAD_ADDRESS): Remove.
>>      * config/arm/arm.c (arm_legitimize_reload_address): Remove.
>>      (thumb_legitimize_reload_address): Remove.
>>      * config/arm/arm-protos.h (arm_legitimize_reload_address):
>>      Remove.
>>      (thumb_legitimize_reload_address): Remove.
>>
>
> This is OK for stage 1.
>
> I have one open question: can LRA generate the optimizations that these
> hooks used to provide through reload?  If not, please could you file
> some bugzilla reports so that we don't lose them.
>
> Thanks,
> R.

arm_legitimize_reload_address was added by 
https://gcc.gnu.org/ml/gcc-patches/2011-04/msg00605.html. From 
config/arm/arm.c, the optimization turns
   	         add t1, r2, #4096
		 ldr r0, [t1, #4]
		 add t2, r2, #4096
		 ldr r1, [t2, #8]
into
	         add t1, r2, #4096
		 ldr r0, [t1, #4]
		 ldr r1, [t1, #8]

As far as I can tell, LRA does do this. Compiling the following with -O1:
----
int bar(int, int, int);
int test1(int* buf)
{
   int a = buf[41000];
   int b = buf[41004];
   int c = buf[41008];
   bar(a, b, c);
   return a +  b + c;
}
----
gcc version 4.5.1 (Sourcery G++ Lite 2010.09-51), which predates the 
optimization, produces
	ldr	r3, .L2
	ldr	r4, [r0, r3]
	add	r3, r3, #16
	ldr	r5, [r0, r3]
	add	r3, r3, #16
	ldr	r6, [r0, r3]

gcc version 4.9.3 20141119 with and without -mno-lra produce
	add	r0, r0, #163840
	ldr	r4, [r0, #160]
	ldr	r6, [r0, #176]
	ldr	r5, [r0, #192]
so it looks the better sequence gets generated.

thumb_legitimize_reload_address was added by 
https://gcc.gnu.org/ml/gcc-patches/2005-08/msg01140.html to fix PR 
23436. It replaces sequences like
	mov	r3, r9
	mov	r2, r10
	ldr	r0, [r3, r2]
with
	mov	r3, r9
	add	r3, r3, r10
	ldr	r0, [r3]

This looks like it's missing from trunk so I'll open a bugzilla report 
for it.

It's quite possible that I've got this all wrong so if I've missed 
something or you'd like me to open a bugzilla report for the ARM 
optimization as well, let me know.

Matthew
Matthew Wahab March 5, 2015, 5:50 p.m. UTC | #3
On 05/03/15 16:34, Matthew Wahab wrote:

> thumb_legitimize_reload_address was added by
> https://gcc.gnu.org/ml/gcc-patches/2005-08/msg01140.html to fix PR
> 23436. It replaces sequences like
> 	mov	r3, r9
> 	mov	r2, r10
> 	ldr	r0, [r3, r2]
> with
> 	mov	r3, r9
> 	add	r3, r3, r10
> 	ldr	r0, [r3]
>
> This looks like it's missing from trunk so I'll open a bugzilla report
> for it.

PR 65326 (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65326).
Matthew
Matthew Wahab April 23, 2015, 11:05 a.m. UTC | #4
On 27/02/15 09:41, Richard Earnshaw wrote:
> On 19/02/15 12:19, Matthew Wahab wrote:
>> The LEGITIMIZE_RELOAD_ADDRESS macro is only needed for reload. Since the
>> ARM backend no longer supports reload, this macro is not needed and this
>> patch removes it.
>>
>
> This is OK for stage 1.

Committed as r222359.
Matthew

2015-04-23  Matthew Wahab  <matthew.wahab@arm.com>

	* config/arm/arm.h (LEGITIMIZE_RELOAD_ADDRESS): Remove.
	(ARM_LEGITIMIZE_RELOAD_ADDRESS): Remove.
	(THUMB_LEGITIMIZE_RELOAD_ADDRESS): Remove.
	* config/arm/arm.c (arm_legimitimize_reload_address): Remove.
	(thumb_legimitimize_reload_address): Remove.
	* config/arm/arm-protos.h (arm_legimitimize_reload_address):
	Remove.
	(thumb_legimitimize_reload_address): Remove.
diff mbox

Patch

diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h
index 307babb..0595cc2 100644
--- a/gcc/config/arm/arm-protos.h
+++ b/gcc/config/arm/arm-protos.h
@@ -66,10 +66,6 @@  extern rtx legitimize_tls_address (rtx, rtx);
 extern bool arm_legitimate_address_p (machine_mode, rtx, bool);
 extern int arm_legitimate_address_outer_p (machine_mode, rtx, RTX_CODE, int);
 extern int thumb_legitimate_offset_p (machine_mode, HOST_WIDE_INT);
-extern bool arm_legitimize_reload_address (rtx *, machine_mode, int, int,
-					   int);
-extern rtx thumb_legitimize_reload_address (rtx *, machine_mode, int, int,
-					    int);
 extern int thumb1_legitimate_address_p (machine_mode, rtx, int);
 extern bool ldm_stm_operation_p (rtx, bool, machine_mode mode,
                                  bool, bool);
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 7bf5b4d..6efe664 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -7932,236 +7932,6 @@  thumb_legitimize_address (rtx x, rtx orig_x, machine_mode mode)
   return x;
 }
 
-bool
-arm_legitimize_reload_address (rtx *p,
-			       machine_mode mode,
-			       int opnum, int type,
-			       int ind_levels ATTRIBUTE_UNUSED)
-{
-  /* We must recognize output that we have already generated ourselves.  */
-  if (GET_CODE (*p) == PLUS
-      && GET_CODE (XEXP (*p, 0)) == PLUS
-      && REG_P (XEXP (XEXP (*p, 0), 0))
-      && CONST_INT_P (XEXP (XEXP (*p, 0), 1))
-      && CONST_INT_P (XEXP (*p, 1)))
-    {
-      push_reload (XEXP (*p, 0), NULL_RTX, &XEXP (*p, 0), NULL,
-		   MODE_BASE_REG_CLASS (mode), GET_MODE (*p),
-		   VOIDmode, 0, 0, opnum, (enum reload_type) type);
-      return true;
-    }
-
-  if (GET_CODE (*p) == PLUS
-      && REG_P (XEXP (*p, 0))
-      && ARM_REGNO_OK_FOR_BASE_P (REGNO (XEXP (*p, 0)))
-      /* If the base register is equivalent to a constant, let the generic
-	 code handle it.  Otherwise we will run into problems if a future
-	 reload pass decides to rematerialize the constant.  */
-      && !reg_equiv_constant (ORIGINAL_REGNO (XEXP (*p, 0)))
-      && CONST_INT_P (XEXP (*p, 1)))
-    {
-      HOST_WIDE_INT val = INTVAL (XEXP (*p, 1));
-      HOST_WIDE_INT low, high;
-
-      /* Detect coprocessor load/stores.  */
-      bool coproc_p = ((TARGET_HARD_FLOAT
-			&& TARGET_VFP
-			&& (mode == SFmode || mode == DFmode))
-		       || (TARGET_REALLY_IWMMXT
-			   && VALID_IWMMXT_REG_MODE (mode))
-		       || (TARGET_NEON
-			   && (VALID_NEON_DREG_MODE (mode)
-			       || VALID_NEON_QREG_MODE (mode))));
-
-      /* For some conditions, bail out when lower two bits are unaligned.  */
-      if ((val & 0x3) != 0
-	  /* Coprocessor load/store indexes are 8-bits + '00' appended.  */
-	  && (coproc_p
-	      /* For DI, and DF under soft-float: */
-	      || ((mode == DImode || mode == DFmode)
-		  /* Without ldrd, we use stm/ldm, which does not
-		     fair well with unaligned bits.  */
-		  && (! TARGET_LDRD
-		      /* Thumb-2 ldrd/strd is [-1020,+1020] in steps of 4.  */
-		      || TARGET_THUMB2))))
-	return false;
-
-      /* When breaking down a [reg+index] reload address into [(reg+high)+low],
-	 of which the (reg+high) gets turned into a reload add insn,
-	 we try to decompose the index into high/low values that can often
-	 also lead to better reload CSE.
-	 For example:
-	         ldr r0, [r2, #4100]  // Offset too large
-		 ldr r1, [r2, #4104]  // Offset too large
-
-	 is best reloaded as:
-	         add t1, r2, #4096
-		 ldr r0, [t1, #4]
-		 add t2, r2, #4096
-		 ldr r1, [t2, #8]
-
-	 which post-reload CSE can simplify in most cases to eliminate the
-	 second add instruction:
-	         add t1, r2, #4096
-		 ldr r0, [t1, #4]
-		 ldr r1, [t1, #8]
-
-	 The idea here is that we want to split out the bits of the constant
-	 as a mask, rather than as subtracting the maximum offset that the
-	 respective type of load/store used can handle.
-
-	 When encountering negative offsets, we can still utilize it even if
-	 the overall offset is positive; sometimes this may lead to an immediate
-	 that can be constructed with fewer instructions.
-	 For example:
-	         ldr r0, [r2, #0x3FFFFC]
-
-	 This is best reloaded as:
-	         add t1, r2, #0x400000
-		 ldr r0, [t1, #-4]
-
-	 The trick for spotting this for a load insn with N bits of offset
-	 (i.e. bits N-1:0) is to look at bit N; if it is set, then chose a
-	 negative offset that is going to make bit N and all the bits below
-	 it become zero in the remainder part.
-
-	 The SIGN_MAG_LOW_ADDR_BITS macro below implements this, with respect
-	 to sign-magnitude addressing (i.e. separate +- bit, or 1's complement),
-	 used in most cases of ARM load/store instructions.  */
-
-#define SIGN_MAG_LOW_ADDR_BITS(VAL, N)					\
-      (((VAL) & ((1 << (N)) - 1))					\
-       ? (((VAL) & ((1 << ((N) + 1)) - 1)) ^ (1 << (N))) - (1 << (N))	\
-       : 0)
-
-      if (coproc_p)
-	{
-	  low = SIGN_MAG_LOW_ADDR_BITS (val, 10);
-
-	  /* NEON quad-word load/stores are made of two double-word accesses,
-	     so the valid index range is reduced by 8. Treat as 9-bit range if
-	     we go over it.  */
-	  if (TARGET_NEON && VALID_NEON_QREG_MODE (mode) && low >= 1016)
-	    low = SIGN_MAG_LOW_ADDR_BITS (val, 9);
-	}
-      else if (GET_MODE_SIZE (mode) == 8)
-	{
-	  if (TARGET_LDRD)
-	    low = (TARGET_THUMB2
-		   ? SIGN_MAG_LOW_ADDR_BITS (val, 10)
-		   : SIGN_MAG_LOW_ADDR_BITS (val, 8));
-	  else
-	    /* For pre-ARMv5TE (without ldrd), we use ldm/stm(db/da/ib)
-	       to access doublewords. The supported load/store offsets are
-	       -8, -4, and 4, which we try to produce here.  */
-	    low = ((val & 0xf) ^ 0x8) - 0x8;
-	}
-      else if (GET_MODE_SIZE (mode) < 8)
-	{
-	  /* NEON element load/stores do not have an offset.  */
-	  if (TARGET_NEON_FP16 && mode == HFmode)
-	    return false;
-
-	  if (TARGET_THUMB2)
-	    {
-	      /* Thumb-2 has an asymmetrical index range of (-256,4096).
-		 Try the wider 12-bit range first, and re-try if the result
-		 is out of range.  */
-	      low = SIGN_MAG_LOW_ADDR_BITS (val, 12);
-	      if (low < -255)
-		low = SIGN_MAG_LOW_ADDR_BITS (val, 8);
-	    }
-	  else
-	    {
-	      if (mode == HImode || mode == HFmode)
-		{
-		  if (arm_arch4)
-		    low = SIGN_MAG_LOW_ADDR_BITS (val, 8);
-		  else
-		    {
-		      /* The storehi/movhi_bytes fallbacks can use only
-			 [-4094,+4094] of the full ldrb/strb index range.  */
-		      low = SIGN_MAG_LOW_ADDR_BITS (val, 12);
-		      if (low == 4095 || low == -4095)
-			return false;
-		    }
-		}
-	      else
-		low = SIGN_MAG_LOW_ADDR_BITS (val, 12);
-	    }
-	}
-      else
-	return false;
-
-      high = ((((val - low) & (unsigned HOST_WIDE_INT) 0xffffffff)
-	       ^ (unsigned HOST_WIDE_INT) 0x80000000)
-	      - (unsigned HOST_WIDE_INT) 0x80000000);
-      /* Check for overflow or zero */
-      if (low == 0 || high == 0 || (high + low != val))
-	return false;
-
-      /* Reload the high part into a base reg; leave the low part
-	 in the mem.
-	 Note that replacing this gen_rtx_PLUS with plus_constant is
-	 wrong in this case because we rely on the
-	 (plus (plus reg c1) c2) structure being preserved so that
-	 XEXP (*p, 0) in push_reload below uses the correct term.  */
-      *p = gen_rtx_PLUS (GET_MODE (*p),
-			 gen_rtx_PLUS (GET_MODE (*p), XEXP (*p, 0),
-				       GEN_INT (high)),
-			 GEN_INT (low));
-      push_reload (XEXP (*p, 0), NULL_RTX, &XEXP (*p, 0), NULL,
-		   MODE_BASE_REG_CLASS (mode), GET_MODE (*p),
-		   VOIDmode, 0, 0, opnum, (enum reload_type) type);
-      return true;
-    }
-
-  return false;
-}
-
-rtx
-thumb_legitimize_reload_address (rtx *x_p,
-				 machine_mode mode,
-				 int opnum, int type,
-				 int ind_levels ATTRIBUTE_UNUSED)
-{
-  rtx x = *x_p;
-
-  if (GET_CODE (x) == PLUS
-      && GET_MODE_SIZE (mode) < 4
-      && REG_P (XEXP (x, 0))
-      && XEXP (x, 0) == stack_pointer_rtx
-      && CONST_INT_P (XEXP (x, 1))
-      && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
-    {
-      rtx orig_x = x;
-
-      x = copy_rtx (x);
-      push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
-		   Pmode, VOIDmode, 0, 0, opnum, (enum reload_type) type);
-      return x;
-    }
-
-  /* If both registers are hi-regs, then it's better to reload the
-     entire expression rather than each register individually.  That
-     only requires one reload register rather than two.  */
-  if (GET_CODE (x) == PLUS
-      && REG_P (XEXP (x, 0))
-      && REG_P (XEXP (x, 1))
-      && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
-      && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
-    {
-      rtx orig_x = x;
-
-      x = copy_rtx (x);
-      push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
-		   Pmode, VOIDmode, 0, 0, opnum, (enum reload_type) type);
-      return x;
-    }
-
-  return NULL;
-}
-
 /* Return TRUE if X contains any TLS symbol references.  */
 
 bool
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index 297dfe1..fc34782 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -1354,46 +1354,6 @@  enum reg_class
      ? GENERAL_REGS : NO_REGS)					\
     : THUMB_SECONDARY_INPUT_RELOAD_CLASS (CLASS, MODE, X)))
 
-/* Try a machine-dependent way of reloading an illegitimate address
-   operand.  If we find one, push the reload and jump to WIN.  This
-   macro is used in only one place: `find_reloads_address' in reload.c.
-
-   For the ARM, we wish to handle large displacements off a base
-   register by splitting the addend across a MOV and the mem insn.
-   This can cut the number of reloads needed.  */
-#define ARM_LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND, WIN)	   \
-  do									   \
-    {									   \
-      if (arm_legitimize_reload_address (&X, MODE, OPNUM, TYPE, IND))	   \
-	goto WIN;							   \
-    }									   \
-  while (0)
-
-/* XXX If an HImode FP+large_offset address is converted to an HImode
-   SP+large_offset address, then reload won't know how to fix it.  It sees
-   only that SP isn't valid for HImode, and so reloads the SP into an index
-   register, but the resulting address is still invalid because the offset
-   is too big.  We fix it here instead by reloading the entire address.  */
-/* We could probably achieve better results by defining PROMOTE_MODE to help
-   cope with the variances between the Thumb's signed and unsigned byte and
-   halfword load instructions.  */
-/* ??? This should be safe for thumb2, but we may be able to do better.  */
-#define THUMB_LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_L, WIN)     \
-do {									      \
-  rtx new_x = thumb_legitimize_reload_address (&X, MODE, OPNUM, TYPE, IND_L); \
-  if (new_x)								      \
-    {									      \
-      X = new_x;							      \
-      goto WIN;								      \
-    }									      \
-} while (0)
-
-#define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_LEVELS, WIN)   \
-  if (TARGET_ARM)							   \
-    ARM_LEGITIMIZE_RELOAD_ADDRESS (X, MODE, OPNUM, TYPE, IND_LEVELS, WIN); \
-  else									   \
-    THUMB_LEGITIMIZE_RELOAD_ADDRESS (X, MODE, OPNUM, TYPE, IND_LEVELS, WIN)
-
 /* Return the maximum number of consecutive registers
    needed to represent mode MODE in a register of class CLASS.
    ARM regs are UNITS_PER_WORD bits.