diff mbox series

[committed] Stack clash protection patch 02/08 - V4

Message ID 779b8875-75cc-a476-5c3d-a6c7e0feb036@redhat.com
State New
Headers show
Series [committed] Stack clash protection patch 02/08 - V4 | expand

Commit Message

Jeff Law Sept. 20, 2017, 5:05 a.m. UTC
The main purpose of this patch is to protect dynamic stack allocations
from stack-clash attacks.  I think the only notable change since V3 was
to use a target hook instead of a target macro to control a bit of
special behavior we're going to want on aarch64.

Bootstrapped and regression tested on x86.  Installing on the trunk.

Jeff
commit feb64697b00b4a52346eaa3b7f349a0636936f7c
Author: Jeff Law <law@torsion.usersys.redhat.com>
Date:   Wed Jun 28 14:02:16 2017 -0400

    2017-09-18  Jeff Law  <law@redhat.com>
    
            * explow.c: Include "params.h".
            (anti_adjust_stack_and_probe_stack_clash): New function.
            (get_stack_check_protect): Likewise.
            (compute_stack_clash_protection_loop_data): Likewise.
            (emit_stack_clash_protection_loop_start): Likewise.
            (emit_stack_clash_protection_loop_end): Likewise.
            (allocate_dynamic_stack_space): Use get_stack_check_protect.
            Use anti_adjust_stack_and_probe_stack_clash.
            * explow.h (compute_stack_clash_protection_loop_data): Prototype.
            (emit_stack_clash_protection_loop_start): Likewise.
            (emit_stack_clash_protection_loop_end): Likewise.
            * rtl.h (get_stack_check_protect): Prototype.
            * target.def (stack_clash_protection_final_dynamic_probe): New hook.
            * targhooks.c (default_stack_clash_protection_final_dynamic_probe): New.
            * targhooks.h (default_stack_clash_protection_final_dynamic_probe):
            Prototype.
            * doc/tm.texi.in (TARGET_STACK_CLASH_PROTECTION_FINAL_DYNAMIC_PROBE):
            Add @hook.
            * doc/tm.texi: Rebuilt.
            * config/aarch64/aarch64.c (aarch64_expand_prologue): Use
            get_stack_check_protect.
            * config/alpha/alpha.c (alpha_expand_prologue): Likewise.
            * config/arm/arm.c (arm_expand_prologue): Likewise.
            (arm_frame_pointer_required): Likewise.
            * config/i386/i386.c (ix86_expand_prologue): Likewise.
            * config/ia64/ia64.c (ia64_expand_prologue): Likewise.
            * config/mips/mips.c (mips_expand_prologue): Likewise.
            * config/powerpcspe/powerpcspe.c (rs6000_emit_prologue): Likewise.
            * config/rs6000/rs6000.c (rs6000_emit_prologue): Likewise.
            * config/sparc/sparc.c (sparc_expand_prologue): Likewise.
            (sparc_flat_expand_prologue): Likewise.
    
            * gcc.dg/stack-check-3.c: New test.
diff mbox series

Patch

diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index a1aa3f6327d..b8012611405 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,37 @@ 
 2017-09-19  Jeff Law  <law@redhat.com>
 
+	* explow.c: Include "params.h".
+	(anti_adjust_stack_and_probe_stack_clash): New function.
+	(get_stack_check_protect): Likewise.
+	(compute_stack_clash_protection_loop_data): Likewise.
+	(emit_stack_clash_protection_loop_start): Likewise.
+	(emit_stack_clash_protection_loop_end): Likewise.
+	(allocate_dynamic_stack_space): Use get_stack_check_protect.
+	Use anti_adjust_stack_and_probe_stack_clash.
+	* explow.h (compute_stack_clash_protection_loop_data): Prototype.
+	(emit_stack_clash_protection_loop_start): Likewise.
+	(emit_stack_clash_protection_loop_end): Likewise.
+	* rtl.h (get_stack_check_protect): Prototype.
+	* target.def (stack_clash_protection_final_dynamic_probe): New hook.
+	* targhooks.c (default_stack_clash_protection_final_dynamic_probe): New.
+	* targhooks.h (default_stack_clash_protection_final_dynamic_probe): 
+	Prototype.
+	* doc/tm.texi.in (TARGET_STACK_CLASH_PROTECTION_FINAL_DYNAMIC_PROBE):
+	Add @hook.
+	* doc/tm.texi: Rebuilt.
+	* config/aarch64/aarch64.c (aarch64_expand_prologue): Use
+	get_stack_check_protect.
+	* config/alpha/alpha.c (alpha_expand_prologue): Likewise.
+	* config/arm/arm.c (arm_expand_prologue): Likewise.
+	(arm_frame_pointer_required): Likewise.
+	* config/i386/i386.c (ix86_expand_prologue): Likewise.
+	* config/ia64/ia64.c (ia64_expand_prologue): Likewise.
+	* config/mips/mips.c (mips_expand_prologue): Likewise.
+	* config/powerpcspe/powerpcspe.c (rs6000_emit_prologue): Likewise.
+	* config/rs6000/rs6000.c (rs6000_emit_prologue): Likewise.
+	* config/sparc/sparc.c (sparc_expand_prologue): Likewise.
+	(sparc_flat_expand_prologue): Likewise.
+
 	* common.opt (-fstack-clash-protection): New option.
 	* flag-types.h (enum stack_check_type): Note difference between
 	-fstack-check= and -fstack-clash-protection.
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 1c1400871d4..5e26cb70da0 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -3666,12 +3666,14 @@  aarch64_expand_prologue (void)
     {
       if (crtl->is_leaf && !cfun->calls_alloca)
 	{
-	  if (frame_size > PROBE_INTERVAL && frame_size > STACK_CHECK_PROTECT)
-	    aarch64_emit_probe_stack_range (STACK_CHECK_PROTECT,
-					    frame_size - STACK_CHECK_PROTECT);
+	  if (frame_size > PROBE_INTERVAL
+	      && frame_size > get_stack_check_protect ())
+	    aarch64_emit_probe_stack_range (get_stack_check_protect (),
+					    (frame_size
+					     - get_stack_check_protect ()));
 	}
       else if (frame_size > 0)
-	aarch64_emit_probe_stack_range (STACK_CHECK_PROTECT, frame_size);
+	aarch64_emit_probe_stack_range (get_stack_check_protect (), frame_size);
     }
 
   aarch64_sub_sp (IP0_REGNUM, initial_adjust, true);
diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c
index e0f458c9109..a4e8b2b6c30 100644
--- a/gcc/config/alpha/alpha.c
+++ b/gcc/config/alpha/alpha.c
@@ -7761,7 +7761,7 @@  alpha_expand_prologue (void)
 
   probed_size = frame_size;
   if (flag_stack_check)
-    probed_size += STACK_CHECK_PROTECT;
+    probed_size += get_stack_check_protect ();
 
   if (probed_size <= 32768)
     {
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index bc802adc3e3..679e838b0aa 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -21693,13 +21693,13 @@  arm_expand_prologue (void)
 
       if (crtl->is_leaf && !cfun->calls_alloca)
 	{
-	  if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
-	    arm_emit_probe_stack_range (STACK_CHECK_PROTECT,
-					size - STACK_CHECK_PROTECT,
+	  if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
+	    arm_emit_probe_stack_range (get_stack_check_protect (),
+					size - get_stack_check_protect (),
 					regno, live_regs_mask);
 	}
       else if (size > 0)
-	arm_emit_probe_stack_range (STACK_CHECK_PROTECT, size,
+	arm_emit_probe_stack_range (get_stack_check_protect (), size,
 				    regno, live_regs_mask);
     }
 
@@ -27886,7 +27886,7 @@  arm_frame_pointer_required (void)
 	{
 	  /* We don't have the final size of the frame so adjust.  */
 	  size += 32 * UNITS_PER_WORD;
-	  if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
+	  if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
 	    return true;
 	}
       else
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 1c765fbc74c..05b0520759c 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -14871,7 +14871,7 @@  ix86_expand_prologue (void)
 	  HOST_WIDE_INT size = allocate;
 
 	  if (TARGET_64BIT && size >= HOST_WIDE_INT_C (0x80000000))
-	    size = 0x80000000 - STACK_CHECK_PROTECT - 1;
+	    size = 0x80000000 - get_stack_check_protect () - 1;
 
 	  if (TARGET_STACK_PROBE)
 	    {
@@ -14881,18 +14881,20 @@  ix86_expand_prologue (void)
 		    ix86_emit_probe_stack_range (0, size);
 		}
 	      else
-		ix86_emit_probe_stack_range (0, size + STACK_CHECK_PROTECT);
+		ix86_emit_probe_stack_range (0,
+					     size + get_stack_check_protect ());
 	    }
 	  else
 	    {
 	      if (crtl->is_leaf && !cfun->calls_alloca)
 		{
-		  if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
-		    ix86_emit_probe_stack_range (STACK_CHECK_PROTECT,
-						 size - STACK_CHECK_PROTECT);
+		  if (size > PROBE_INTERVAL
+		      && size > get_stack_check_protect ())
+		    ix86_emit_probe_stack_range (get_stack_check_protect (),
+						 size - get_stack_check_protect ());
 		}
 	      else
-		ix86_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+		ix86_emit_probe_stack_range (get_stack_check_protect (), size);
 	    }
 	}
     }
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index b4d63592ea0..00ef2159323 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -3502,15 +3502,16 @@  ia64_expand_prologue (void)
 
       if (crtl->is_leaf && !cfun->calls_alloca)
 	{
-	  if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
-	    ia64_emit_probe_stack_range (STACK_CHECK_PROTECT,
-					 size - STACK_CHECK_PROTECT,
+	  if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
+	    ia64_emit_probe_stack_range (get_stack_check_protect (),
+					 size - get_stack_check_protect (),
 					 bs_size);
-	  else if (size + bs_size > STACK_CHECK_PROTECT)
-	    ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, 0, bs_size);
+	  else if (size + bs_size > get_stack_check_protect ())
+	    ia64_emit_probe_stack_range (get_stack_check_protect (),
+					 0, bs_size);
 	}
       else if (size + bs_size > 0)
-	ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, size, bs_size);
+	ia64_emit_probe_stack_range (get_stack_check_protect (), size, bs_size);
     }
 
   if (dump_file) 
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 7eaff14d472..67cee0bb051 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -12084,12 +12084,12 @@  mips_expand_prologue (void)
     {
       if (crtl->is_leaf && !cfun->calls_alloca)
 	{
-	  if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
-	    mips_emit_probe_stack_range (STACK_CHECK_PROTECT,
-					 size - STACK_CHECK_PROTECT);
+	  if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
+	    mips_emit_probe_stack_range (get_stack_check_protect (),
+					 size - get_stack_check_protect ());
 	}
       else if (size > 0)
-	mips_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+	mips_emit_probe_stack_range (get_stack_check_protect (), size);
     }
 
   /* Save the registers.  Allocate up to MIPS_MAX_FIRST_STACK_STEP
diff --git a/gcc/config/powerpcspe/powerpcspe.c b/gcc/config/powerpcspe/powerpcspe.c
index 11664eed4e0..a956729f706 100644
--- a/gcc/config/powerpcspe/powerpcspe.c
+++ b/gcc/config/powerpcspe/powerpcspe.c
@@ -29693,12 +29693,12 @@  rs6000_emit_prologue (void)
 
       if (crtl->is_leaf && !cfun->calls_alloca)
 	{
-	  if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
-	    rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
-					   size - STACK_CHECK_PROTECT);
+	  if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
+	    rs6000_emit_probe_stack_range (get_stack_check_protect (),
+					   size - get_stack_check_protect ());
 	}
       else if (size > 0)
-	rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+	rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
     }
 
   if (TARGET_FIX_AND_CONTINUE)
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 1978634f02e..e5ef63889b7 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -26765,12 +26765,12 @@  rs6000_emit_prologue (void)
 
       if (crtl->is_leaf && !cfun->calls_alloca)
 	{
-	  if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
-	    rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
-					   size - STACK_CHECK_PROTECT);
+	  if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
+	    rs6000_emit_probe_stack_range (get_stack_check_protect (),
+					   size - get_stack_check_protect ());
 	}
       else if (size > 0)
-	rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+	rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
     }
 
   if (TARGET_FIX_AND_CONTINUE)
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index 749a7f866cb..906bd75b560 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -5738,12 +5738,12 @@  sparc_expand_prologue (void)
     {
       if (crtl->is_leaf && !cfun->calls_alloca)
 	{
-	  if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
-	    sparc_emit_probe_stack_range (STACK_CHECK_PROTECT,
-					  size - STACK_CHECK_PROTECT);
+	  if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
+	    sparc_emit_probe_stack_range (get_stack_check_protect (),
+					  size - get_stack_check_protect ());
 	}
       else if (size > 0)
-	sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+	sparc_emit_probe_stack_range (get_stack_check_protect (), size);
     }
 
   if (size == 0)
@@ -5849,12 +5849,12 @@  sparc_flat_expand_prologue (void)
     {
       if (crtl->is_leaf && !cfun->calls_alloca)
 	{
-	  if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
-	    sparc_emit_probe_stack_range (STACK_CHECK_PROTECT,
-					  size - STACK_CHECK_PROTECT);
+	  if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
+	    sparc_emit_probe_stack_range (get_stack_check_protect (),
+					  size - get_stack_check_protect ());
 	}
       else if (size > 0)
-	sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+	sparc_emit_probe_stack_range (get_stack_check_protect (), size);
     }
 
   if (sparc_save_local_in_regs_p)
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index ae65e4f5edf..07ae66a2e61 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -3411,6 +3411,10 @@  GCC computed the default from the values of the above macros and you will
 normally not need to override that default.
 @end defmac
 
+@deftypefn {Target Hook} bool TARGET_STACK_CLASH_PROTECTION_FINAL_DYNAMIC_PROBE (rtx @var{residual})
+Some targets make optimistic assumptions about the state of stack probing when they emit their prologues.  On such targets a probe into the end of any dynamically allocated space is likely required for safety against stack clash style attacks.  Define this variable to return nonzero if such a probe is required or zero otherwise.  You need not define this macro if it would always have the value zero.
+@end deftypefn
+
 @need 2000
 @node Frame Registers
 @subsection Registers That Address the Stack Frame
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index 733466dbb84..6a794371342 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -2847,6 +2847,8 @@  GCC computed the default from the values of the above macros and you will
 normally not need to override that default.
 @end defmac
 
+@hook TARGET_STACK_CLASH_PROTECTION_FINAL_DYNAMIC_PROBE
+
 @need 2000
 @node Frame Registers
 @subsection Registers That Address the Stack Frame
diff --git a/gcc/explow.c b/gcc/explow.c
index 638dc5f8f0f..0f30507d1f5 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -40,8 +40,10 @@  along with GCC; see the file COPYING3.  If not see
 #include "expr.h"
 #include "common/common-target.h"
 #include "output.h"
+#include "params.h"
 
 static rtx break_out_memory_refs (rtx);
+static void anti_adjust_stack_and_probe_stack_clash (rtx);
 
 
 /* Truncate and perhaps sign-extend C as appropriate for MODE.  */
@@ -1283,6 +1285,29 @@  get_dynamic_stack_size (rtx *psize, unsigned size_align,
   *psize = size;
 }
 
+/* Return the number of bytes to "protect" on the stack for -fstack-check.
+
+   "protect" in the context of -fstack-check means how many bytes we
+   should always ensure are available on the stack.  More importantly
+   this is how many bytes are skipped when probing the stack.
+
+   On some targets we want to reuse the -fstack-check prologue support
+   to give a degree of protection against stack clashing style attacks.
+
+   In that scenario we do not want to skip bytes before probing as that
+   would render the stack clash protections useless.
+
+   So we never use STACK_CHECK_PROTECT directly.  Instead we indirect though
+   this helper which allows us to provide different values for
+   -fstack-check and -fstack-clash-protection.  */
+HOST_WIDE_INT
+get_stack_check_protect (void)
+{
+  if (flag_stack_clash_protection)
+    return 0;
+ return STACK_CHECK_PROTECT;
+}
+
 /* Return an rtx representing the address of an area of memory dynamically
    pushed on the stack.
 
@@ -1441,7 +1466,7 @@  allocate_dynamic_stack_space (rtx size, unsigned size_align,
     probe_stack_range (STACK_OLD_CHECK_PROTECT + STACK_CHECK_MAX_FRAME_SIZE,
 		       size);
   else if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
-    probe_stack_range (STACK_CHECK_PROTECT, size);
+    probe_stack_range (get_stack_check_protect (), size);
 
   /* Don't let anti_adjust_stack emit notes.  */
   suppress_reg_args_size = true;
@@ -1494,6 +1519,8 @@  allocate_dynamic_stack_space (rtx size, unsigned size_align,
 
       if (flag_stack_check && STACK_CHECK_MOVING_SP)
 	anti_adjust_stack_and_probe (size, false);
+      else if (flag_stack_clash_protection)
+	anti_adjust_stack_and_probe_stack_clash (size);
       else
 	anti_adjust_stack (size);
 
@@ -1769,6 +1796,219 @@  probe_stack_range (HOST_WIDE_INT first, rtx size)
   emit_insn (gen_blockage ());
 }
 
+/* Compute parameters for stack clash probing a dynamic stack
+   allocation of SIZE bytes.
+
+   We compute ROUNDED_SIZE, LAST_ADDR, RESIDUAL and PROBE_INTERVAL.
+
+   Additionally we conditionally dump the type of probing that will
+   be needed given the values computed.  */
+
+void
+compute_stack_clash_protection_loop_data (rtx *rounded_size, rtx *last_addr,
+					  rtx *residual,
+					  HOST_WIDE_INT *probe_interval,
+					  rtx size)
+{
+  /* Round SIZE down to STACK_CLASH_PROTECTION_PROBE_INTERVAL */
+  *probe_interval
+    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+  *rounded_size = simplify_gen_binary (AND, Pmode, size,
+				        GEN_INT (-*probe_interval));
+
+  /* Compute the value of the stack pointer for the last iteration.
+     It's just SP + ROUNDED_SIZE.  */
+  rtx rounded_size_op = force_operand (*rounded_size, NULL_RTX);
+  *last_addr = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
+					      stack_pointer_rtx,
+					      rounded_size_op),
+			      NULL_RTX);
+
+  /* Compute any residuals not allocated by the loop above.  Residuals
+     are just the ROUNDED_SIZE - SIZE.  */
+  *residual = simplify_gen_binary (MINUS, Pmode, size, *rounded_size);
+
+  /* Dump key information to make writing tests easy.  */
+  if (dump_file)
+    {
+      if (*rounded_size == CONST0_RTX (Pmode))
+	fprintf (dump_file,
+		 "Stack clash skipped dynamic allocation and probing loop.\n");
+      else if (GET_CODE (*rounded_size) == CONST_INT
+	       && INTVAL (*rounded_size) <= 4 * *probe_interval)
+	fprintf (dump_file,
+		 "Stack clash dynamic allocation and probing inline.\n");
+      else if (GET_CODE (*rounded_size) == CONST_INT)
+	fprintf (dump_file,
+		 "Stack clash dynamic allocation and probing in "
+		 "rotated loop.\n");
+      else
+	fprintf (dump_file,
+		 "Stack clash dynamic allocation and probing in loop.\n");
+
+      if (*residual != CONST0_RTX (Pmode))
+	fprintf (dump_file,
+		 "Stack clash dynamic allocation and probing residuals.\n");
+      else
+	fprintf (dump_file,
+		 "Stack clash skipped dynamic allocation and "
+		 "probing residuals.\n");
+    }
+}
+
+/* Emit the start of an allocate/probe loop for stack
+   clash protection.
+
+   LOOP_LAB and END_LAB are returned for use when we emit the
+   end of the loop.
+
+   LAST addr is the value for SP which stops the loop.  */
+void
+emit_stack_clash_protection_probe_loop_start (rtx *loop_lab,
+					      rtx *end_lab,
+					      rtx last_addr,
+					      bool rotated)
+{
+  /* Essentially we want to emit any setup code, the top of loop
+     label and the comparison at the top of the loop.  */
+  *loop_lab = gen_label_rtx ();
+  *end_lab = gen_label_rtx ();
+
+  emit_label (*loop_lab);
+  if (!rotated)
+    emit_cmp_and_jump_insns (stack_pointer_rtx, last_addr, EQ, NULL_RTX,
+			     Pmode, 1, *end_lab);
+}
+
+/* Emit the end of a stack clash probing loop.
+
+   This consists of just the jump back to LOOP_LAB and
+   emitting END_LOOP after the loop.  */
+
+void
+emit_stack_clash_protection_probe_loop_end (rtx loop_lab, rtx end_loop,
+					    rtx last_addr, bool rotated)
+{
+  if (rotated)
+    emit_cmp_and_jump_insns (stack_pointer_rtx, last_addr, NE, NULL_RTX,
+			     Pmode, 1, loop_lab);
+  else
+    emit_jump (loop_lab);
+
+  emit_label (end_loop);
+
+}
+
+/* Adjust the stack pointer by minus SIZE (an rtx for a number of bytes)
+   while probing it.  This pushes when SIZE is positive.  SIZE need not
+   be constant.
+
+   This is subtly different than anti_adjust_stack_and_probe to try and
+   prevent stack-clash attacks
+
+     1. It must assume no knowledge of the probing state, any allocation
+	must probe.
+
+	Consider the case of a 1 byte alloca in a loop.  If the sum of the
+	allocations is large, then this could be used to jump the guard if
+	probes were not emitted.
+
+     2. It never skips probes, whereas anti_adjust_stack_and_probe will
+	skip probes on the first couple PROBE_INTERVALs on the assumption
+	they're done elsewhere.
+
+     3. It only allocates and probes SIZE bytes, it does not need to
+	allocate/probe beyond that because this probing style does not
+	guarantee signal handling capability if the guard is hit.  */
+
+static void
+anti_adjust_stack_and_probe_stack_clash (rtx size)
+{
+  /* First ensure SIZE is Pmode.  */
+  if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode)
+    size = convert_to_mode (Pmode, size, 1);
+
+  /* We can get here with a constant size on some targets.  */
+  rtx rounded_size, last_addr, residual;
+  HOST_WIDE_INT probe_interval;
+  compute_stack_clash_protection_loop_data (&rounded_size, &last_addr,
+					    &residual, &probe_interval, size);
+
+  if (rounded_size != CONST0_RTX (Pmode))
+    {
+      if (INTVAL (rounded_size) <= 4 * probe_interval)
+	{
+	  for (HOST_WIDE_INT i = 0;
+	       i < INTVAL (rounded_size);
+	       i += probe_interval)
+	    {
+	      anti_adjust_stack (GEN_INT (probe_interval));
+
+	      /* The prologue does not probe residuals.  Thus the offset
+		 here to probe just beyond what the prologue had already
+		 allocated.  */
+	      emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+					       (probe_interval
+						- GET_MODE_SIZE (word_mode))));
+	      emit_insn (gen_blockage ());
+	    }
+	}
+      else
+	{
+	  rtx loop_lab, end_loop;
+	  bool rotate_loop = GET_CODE (rounded_size) == CONST_INT;
+	  emit_stack_clash_protection_probe_loop_start (&loop_lab, &end_loop,
+							last_addr, rotate_loop);
+
+	  anti_adjust_stack (GEN_INT (probe_interval));
+
+	  /* The prologue does not probe residuals.  Thus the offset here
+	     to probe just beyond what the prologue had already allocated.  */
+	  emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+					   (probe_interval
+					    - GET_MODE_SIZE (word_mode))));
+
+	  emit_stack_clash_protection_probe_loop_end (loop_lab, end_loop,
+						      last_addr, rotate_loop);
+	  emit_insn (gen_blockage ());
+	}
+    }
+
+  if (residual != CONST0_RTX (Pmode))
+    {
+      rtx x = force_reg (Pmode, plus_constant (Pmode, residual,
+					       -GET_MODE_SIZE (word_mode)));
+      anti_adjust_stack (residual);
+      emit_stack_probe (gen_rtx_PLUS (Pmode, stack_pointer_rtx, x));
+      emit_insn (gen_blockage ());
+    }
+
+  /* Some targets make optimistic assumptions in their prologues about
+     how the caller may have probed the stack.  Make sure we honor
+     those assumptions when needed.  */
+  if (size != CONST0_RTX (Pmode)
+      && targetm.stack_clash_protection_final_dynamic_probe (residual))
+    {
+      /* Ideally we would just probe at *sp.  However, if SIZE is not
+	 a compile-time constant, but is zero at runtime, then *sp
+	 might hold live data.  So probe at *sp if we know that
+	 an allocation was made, otherwise probe into the red zone
+	 which is obviously undesirable.  */
+      if (GET_CODE (size) == CONST_INT)
+	{
+	  emit_stack_probe (stack_pointer_rtx);
+	  emit_insn (gen_blockage ());
+	}
+      else
+	{
+	  emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+					   -GET_MODE_SIZE (word_mode)));
+	  emit_insn (gen_blockage ());
+	}
+    }
+}
+
+
 /* Adjust the stack pointer by minus SIZE (an rtx for a number of bytes)
    while probing it.  This pushes when SIZE is positive.  SIZE need not
    be constant.  If ADJUST_BACK is true, adjust back the stack pointer
diff --git a/gcc/explow.h b/gcc/explow.h
index 217a3226adb..b85c051e8ce 100644
--- a/gcc/explow.h
+++ b/gcc/explow.h
@@ -69,6 +69,15 @@  extern void anti_adjust_stack (rtx);
 /* Add some bytes to the stack while probing it.  An rtx says how many. */
 extern void anti_adjust_stack_and_probe (rtx, bool);
 
+/* Support for building allocation/probing loops for stack-clash
+   protection of dyamically allocated stack space.  */
+extern void compute_stack_clash_protection_loop_data (rtx *, rtx *, rtx *,
+						      HOST_WIDE_INT *, rtx);
+extern void emit_stack_clash_protection_probe_loop_start (rtx *, rtx *,
+							  rtx, bool);
+extern void emit_stack_clash_protection_probe_loop_end (rtx, rtx,
+							rtx, bool);
+
 /* This enum is used for the following two functions.  */
 enum save_level {SAVE_BLOCK, SAVE_FUNCTION, SAVE_NONLOCAL};
 
diff --git a/gcc/rtl.h b/gcc/rtl.h
index a450a04c237..a63f33e747a 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -2722,6 +2722,7 @@  get_full_set_src_cost (rtx x, machine_mode mode, struct full_rtx_costs *c)
 /* In explow.c */
 extern HOST_WIDE_INT trunc_int_for_mode	(HOST_WIDE_INT, machine_mode);
 extern rtx plus_constant (machine_mode, rtx, HOST_WIDE_INT, bool = false);
+extern HOST_WIDE_INT get_stack_check_protect (void);
 
 /* In rtl.c */
 extern rtx rtx_alloc (RTX_CODE CXX_MEM_STAT_INFO);
diff --git a/gcc/target.def b/gcc/target.def
index f4c35763686..ae22d7a6183 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -5735,6 +5735,13 @@  these registers when the target switches are opposed to them.)",
  void, (void),
  hook_void_void)
 
+DEFHOOK
+(stack_clash_protection_final_dynamic_probe,
+ "Some targets make optimistic assumptions about the state of stack probing when they emit their prologues.  On such targets a probe into the end of any dynamically allocated space is likely required for safety against stack clash style attacks.  Define this variable to return nonzero if such a probe is required or zero otherwise.  You need not define this macro if it would always have the value zero.",
+ bool, (rtx residual),
+ default_stack_clash_protection_final_dynamic_probe)
+
+
 /* Functions specific to the C family of frontends.  */
 #undef HOOK_PREFIX
 #define HOOK_PREFIX "TARGET_C_"
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index 4bd4833dfdd..d87d6c7f430 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -2207,4 +2207,10 @@  default_excess_precision (enum excess_precision_type ATTRIBUTE_UNUSED)
   return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
 }
 
+HOST_WIDE_INT
+default_stack_clash_protection_final_dynamic_probe (rtx residual ATTRIBUTE_UNUSED)
+{
+  return 0;
+}
+
 #include "gt-targhooks.h"
diff --git a/gcc/targhooks.h b/gcc/targhooks.h
index 6d78508c8a2..a70992d034e 100644
--- a/gcc/targhooks.h
+++ b/gcc/targhooks.h
@@ -271,5 +271,6 @@  extern unsigned int default_min_arithmetic_precision (void);
 
 extern enum flt_eval_method
 default_excess_precision (enum excess_precision_type ATTRIBUTE_UNUSED);
+extern bool default_stack_clash_protection_final_dynamic_probe (rtx);
 
 #endif /* GCC_TARGHOOKS_H */
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index a77b54604e6..221956496ae 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,5 +1,7 @@ 
 2017-09-19  Jeff Law  <law@redhat.com>
 
+	* gcc.dg/stack-check-3.c: New test.
+
 	* gcc.dg/stack-check-2.c: New test.
 	* lib/target-supports.exp
 	(check_effective_target_supports_stack_clash_protection): New function.
diff --git a/gcc/testsuite/gcc.dg/stack-check-3.c b/gcc/testsuite/gcc.dg/stack-check-3.c
new file mode 100644
index 00000000000..58fb65649ee
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/stack-check-3.c
@@ -0,0 +1,86 @@ 
+/* The goal here is to ensure that dynamic allocations via vlas or
+   alloca calls receive probing.
+
+   Scanning the RTL or assembly code seems like insanity here as does
+   checking for particular allocation sizes and probe offsets.  For
+   now we just verify that there's an allocation + probe loop and
+   residual allocation + probe for f?.  */
+
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection -fdump-rtl-expand -fno-optimize-sibling-calls --param stack-clash-protection-probe-interval=4096 --param stack-clash-protection-guard-size=4096" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+__attribute__((noinline, noclone)) void
+foo (char *p)
+{
+  asm volatile ("" : : "r" (p) : "memory");
+}
+
+/* Simple VLA, no other locals. */
+__attribute__((noinline, noclone)) void
+f0 (int x)
+{
+  char vla[x];
+  foo (vla);
+}
+
+/* Simple VLA, small local frame.  */
+__attribute__((noinline, noclone)) void
+f1 (int x)
+{
+  char locals[128];
+  char vla[x];
+  foo (vla);
+}
+
+/* Small constant alloca, no other locals. */
+__attribute__((noinline, noclone)) void
+f2 (int x)
+{
+  char *vla = __builtin_alloca (128);
+  foo (vla);
+}
+
+/* Big constant alloca, small local frame.  */
+__attribute__((noinline, noclone)) void
+f3 (int x)
+{
+  char locals[128];
+  char *vla = __builtin_alloca (16384);
+  foo (vla);
+}
+
+/* Big constant alloca, small local frame.  */
+__attribute__((noinline, noclone)) void
+f3a (int x)
+{
+  char locals[128];
+  char *vla = __builtin_alloca (32768);
+  foo (vla);
+}
+
+/* Nonconstant alloca, no other locals. */
+__attribute__((noinline, noclone)) void
+f4 (int x)
+{
+  char *vla = __builtin_alloca (x);
+  foo (vla);
+}
+
+/* Nonconstant alloca, small local frame.  */
+__attribute__((noinline, noclone)) void
+f5 (int x)
+{
+  char locals[128];
+  char *vla = __builtin_alloca (x);
+  foo (vla);
+}
+
+/* { dg-final { scan-rtl-dump-times "allocation and probing residuals" 7 "expand" } } */
+
+
+/* { dg-final { scan-rtl-dump-times "allocation and probing in loop" 7 "expand" { target callee_realigns_stack } } } */
+/* { dg-final { scan-rtl-dump-times "allocation and probing in loop" 4 "expand" { target { ! callee_realigns_stack } } } } */
+/* { dg-final { scan-rtl-dump-times "allocation and probing in rotated loop" 1 "expand" { target { ! callee_realigns_stack } } } } */
+/* { dg-final { scan-rtl-dump-times "allocation and probing inline" 1 "expand" { target { ! callee_realigns_stack } } } } */
+/* { dg-final { scan-rtl-dump-times "skipped dynamic allocation and probing loop" 1 "expand" { target { ! callee_realigns_stack } } } } */

commit 74af142db7e145d887fc63f9beb342027095e285
Author: law <law@138bc75d-0d04-0410-961f-82ee72b054a4>
Date:   Wed Sep 20 04:56:54 2017 +0000

            * common.opt (-fstack-clash-protection): New option.
            * flag-types.h (enum stack_check_type): Note difference between
            -fstack-check= and -fstack-clash-protection.
            * params.def (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE): New PARAM.
            (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL): Likewise.
            * toplev.c (process_options): Issue warnings/errors for cases
            not handled with -fstack-clash-protection.
            * doc/invoke.texi (-fstack-clash-protection): Document new option.
            (-fstack-check): Note additional problem with -fstack-check=generic.
            Note that -fstack-check is primarily for Ada and refer users
            to -fstack-clash-protection for stack-clash-protection.
            Document new params for stack clash protection.
    
            * gcc.dg/stack-check-2.c: New test.
            * lib/target-supports.exp
            (check_effective_target_supports_stack_clash_protection): New function.
            (check_effective_target_frame_pointer_for_non_leaf): Likewise.
            (check_effective_target_caller_implicit_probes): Likewise.
    
    git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@252994 138bc75d-0d04-0410-961f-82ee72b054a4

diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 9576c8c4f47..a1aa3f6327d 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,18 @@ 
+2017-09-19  Jeff Law  <law@redhat.com>
+
+	* common.opt (-fstack-clash-protection): New option.
+	* flag-types.h (enum stack_check_type): Note difference between
+	-fstack-check= and -fstack-clash-protection.
+	* params.def (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE): New PARAM.
+	(PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL): Likewise.
+	* toplev.c (process_options): Issue warnings/errors for cases
+	not handled with -fstack-clash-protection.
+	* doc/invoke.texi (-fstack-clash-protection): Document new option.
+	(-fstack-check): Note additional problem with -fstack-check=generic.
+	Note that -fstack-check is primarily for Ada and refer users
+	to -fstack-clash-protection for stack-clash-protection.
+	Document new params for stack clash protection.
+
 2017-09-19  Uros Bizjak  <ubizjak@gmail.com>
 
 	* config/i386/i386.md (*scc_bt<mode>): New insn_and_split pattern.
diff --git a/gcc/common.opt b/gcc/common.opt
index fa6dd845d54..f22661cfbba 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -2320,13 +2320,18 @@  Common Report Var(flag_variable_expansion_in_unroller) Optimization
 Apply variable expansion when loops are unrolled.
 
 fstack-check=
-Common Report RejectNegative Joined
+Common Report RejectNegative Joined Optimization
 -fstack-check=[no|generic|specific]	Insert stack checking code into the program.
 
 fstack-check
 Common Alias(fstack-check=, specific, no)
 Insert stack checking code into the program.  Same as -fstack-check=specific.
 
+fstack-clash-protection
+Common Report Var(flag_stack_clash_protection) Optimization
+Insert code to probe each page of stack space as it is allocated to protect
+from stack-clash style attacks.
+
 fstack-limit
 Common Var(common_deferred_options) Defer
 
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 204c9b77b61..f0f9559b024 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -10187,6 +10187,21 @@  compilation without.  The value for compilation with profile feedback
 needs to be more conservative (higher) in order to make tracer
 effective.
 
+@item stack-clash-protection-guard-size
+Specify the size of the operating system provided stack guard as
+2 raised to @var{num} bytes.  The default value is 12 (4096 bytes).
+Acceptable values are between 12 and 30.  Higher values may reduce the
+number of explicit probes, but a value larger than the operating system
+provided guard will leave code vulnerable to stack clash style attacks.
+
+@item stack-clash-protection-probe-interval
+Stack clash protection involves probing stack space as it is allocated.  This
+param controls the maximum distance between probes into the stack as 2 raised
+to @var{num} bytes.  Acceptable values are between 10 and 16 and defaults to
+12.  Higher values may reduce the number of explicit probes, but a value
+larger than the operating system provided guard will leave code vulnerable to
+stack clash style attacks.
+
 @item max-cse-path-length
 
 The maximum number of basic blocks on path that CSE considers.
@@ -11412,7 +11427,8 @@  target support in the compiler but comes with the following drawbacks:
 @enumerate
 @item
 Modified allocation strategy for large objects: they are always
-allocated dynamically if their size exceeds a fixed threshold.
+allocated dynamically if their size exceeds a fixed threshold.  Note this
+may change the semantics of some code.
 
 @item
 Fixed limit on the size of the static frame of functions: when it is
@@ -11427,6 +11443,25 @@  generic implementation, code performance is hampered.
 Note that old-style stack checking is also the fallback method for
 @samp{specific} if no target support has been added in the compiler.
 
+@samp{-fstack-check=} is designed for Ada's needs to detect infinite recursion
+and stack overflows.  @samp{specific} is an excellent choice when compiling
+Ada code.  It is not generally sufficient to protect against stack-clash
+attacks.  To protect against those you want @samp{-fstack-clash-protection}.
+
+@item -fstack-clash-protection
+@opindex fstack-clash-protection
+Generate code to prevent stack clash style attacks.  When this option is
+enabled, the compiler will only allocate one page of stack space at a time
+and each page is accessed immediately after allocation.  Thus, it prevents
+allocations from jumping over any stack guard page provided by the
+operating system.
+
+Most targets do not fully support stack clash protection.  However, on
+those targets @option{-fstack-clash-protection} will protect dynamic stack
+allocations.  @option{-fstack-clash-protection} may also provide limited
+protection for static stack allocations if the target supports
+@option{-fstack-check=specific}.
+
 @item -fstack-limit-register=@var{reg}
 @itemx -fstack-limit-symbol=@var{sym}
 @itemx -fno-stack-limit
diff --git a/gcc/flag-types.h b/gcc/flag-types.h
index 4938f69b1b6..1f439d35b07 100644
--- a/gcc/flag-types.h
+++ b/gcc/flag-types.h
@@ -166,7 +166,14 @@  enum permitted_flt_eval_methods
   PERMITTED_FLT_EVAL_METHODS_C11
 };
 
-/* Type of stack check.  */
+/* Type of stack check.
+
+   Stack checking is designed to detect infinite recursion and stack
+   overflows for Ada programs.  Furthermore stack checking tries to ensure
+   in that scenario that enough stack space is left to run a signal handler.
+
+   -fstack-check= does not prevent stack-clash style attacks.  For that
+   you want -fstack-clash-protection.  */
 enum stack_check_type
 {
   /* Do not check the stack.  */
diff --git a/gcc/params.def b/gcc/params.def
index 805302bb93e..860e79e3209 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -213,6 +213,16 @@  DEFPARAM(PARAM_STACK_FRAME_GROWTH,
 	 "Maximal stack frame growth due to inlining (in percent).",
 	 1000, 0, 0)
 
+DEFPARAM(PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE,
+	 "stack-clash-protection-guard-size",
+	 "Size of the stack guard expressed as a power of two.",
+	 12, 12, 30)
+
+DEFPARAM(PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL,
+	 "stack-clash-protection-probe-interval",
+	 "Interval in which to probe the stack expressed as a power of two.",
+	 12, 10, 16)
+
 /* The GCSE optimization will be disabled if it would require
    significantly more memory than this value.  */
 DEFPARAM(PARAM_MAX_GCSE_MEMORY,
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 011420575b2..a77b54604e6 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,11 @@ 
+2017-09-19  Jeff Law  <law@redhat.com>
+
+	* gcc.dg/stack-check-2.c: New test.
+	* lib/target-supports.exp
+	(check_effective_target_supports_stack_clash_protection): New function.
+	(check_effective_target_frame_pointer_for_non_leaf): Likewise.
+	(check_effective_target_caller_implicit_probes): Likewise.
+
 2017-09-19  Uros Bizjak  <ubizjak@gmail.com>
 
 	* gcc.target/i386/bt-5.c: New test.
diff --git a/gcc/testsuite/gcc.dg/stack-check-2.c b/gcc/testsuite/gcc.dg/stack-check-2.c
new file mode 100644
index 00000000000..196c4bbfbdd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/stack-check-2.c
@@ -0,0 +1,66 @@ 
+/* The goal here is to ensure that we never consider a call to a noreturn
+   function as a potential tail call.
+
+   Right now GCC discovers potential tail calls by looking at the
+   predecessors of the exit block.  A call to a non-return function
+   has no successors and thus can never match that first filter.
+
+   But that could change one day and we want to catch it.  The problem
+   is the compiler could potentially optimize a tail call to a nonreturn
+   function, even if the caller has a frame.  That breaks the assumption
+   that calls probe *sp when saving the return address that some targets
+   depend on to elide stack probes.  */
+
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection -fdump-tree-tailc -fdump-tree-optimized" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+extern void foo (void) __attribute__ ((__noreturn__));
+
+
+void
+test_direct_1 (void)
+{
+  foo ();
+}
+
+void
+test_direct_2 (void)
+{
+  return foo ();
+}
+
+void (*indirect)(void)__attribute__ ((noreturn));
+
+
+void
+test_indirect_1 ()
+{
+  (*indirect)();
+}
+
+void
+test_indirect_2 (void)
+{
+  return (*indirect)();;
+}
+
+
+typedef void (*pvfn)() __attribute__ ((noreturn));
+
+void (*indirect_casted)(void);
+
+void
+test_indirect_casted_1 ()
+{
+  (*(pvfn)indirect_casted)();
+}
+
+void
+test_indirect_casted_2 (void)
+{
+  return (*(pvfn)indirect_casted)();
+}
+/* { dg-final { scan-tree-dump-not "tail call" "tailc" } } */
+/* { dg-final { scan-tree-dump-not "tail call" "optimized" } } */
+
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 6ea71222c6e..feed524680d 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -8621,3 +8621,80 @@  proc check_effective_target_autoincdec { } {
     }
     return 0
 }
+
+# Return 1 if the target has support for stack probing designed
+# to avoid stack-clash style attacks.
+#
+# This is used to restrict the stack-clash mitigation tests to
+# just those targets that have been explicitly supported.
+# 
+# In addition to the prologue work on those targets, each target's
+# properties should be described in the functions below so that
+# tests do not become a mess of unreadable target conditions.
+# 
+proc check_effective_target_supports_stack_clash_protection { } {
+
+   # Temporary until the target bits are fully ACK'd.
+#  if { [istarget aarch*-*-*] || [istarget x86_64-*-*]
+#       || [istarget i?86-*-*] || [istarget s390*-*-*]
+#       || [istarget powerpc*-*-*] || [istarget rs6000*-*-*] } {
+#	return 1
+#  }
+  return 0
+}
+
+# Return 1 if the target creates a frame pointer for non-leaf functions
+# Note we ignore cases where we apply tail call optimization here.
+proc check_effective_target_frame_pointer_for_non_leaf { } {
+  if { [istarget aarch*-*-*] } {
+	return 1
+  }
+  return 0
+}
+
+# Return 1 if the target's calling sequence or its ABI
+# create implicit stack probes at or prior to function entry.
+proc check_effective_target_caller_implicit_probes { } {
+
+  # On x86/x86_64 the call instruction itself pushes the return
+  # address onto the stack.  That is an implicit probe of *sp.
+  if { [istarget x86_64-*-*] || [istarget i?86-*-*] } {
+	return 1
+  }
+
+  # On PPC, the ABI mandates that the address of the outer
+  # frame be stored at *sp.  Thus each allocation of stack
+  # space is itself an implicit probe of *sp.
+  if { [istarget powerpc*-*-*] || [istarget rs6000*-*-*] } {
+	return 1
+  }
+
+  # s390's ABI has a register save area allocated by the
+  # caller for use by the callee.  The mere existence does
+  # not constitute a probe by the caller, but when the slots
+  # used by the callee those stores are implicit probes.
+  if { [istarget s390*-*-*] } {
+	return 1
+  }
+
+  # Not strictly true on aarch64, but we have agreed that we will
+  # consider any function that pushes SP more than 3kbytes into
+  # the guard page as broken.  This essentially means that we can
+  # consider the aarch64 as having a caller implicit probe at
+  # *(sp + 1k).
+  if { [istarget aarch64*-*-*] } {
+	return 1;
+  }
+
+  return 0
+}
+
+# Targets that potentially realign the stack pointer often cause residual
+# stack allocations and make it difficult to elimination loops or residual
+# allocations for dynamic stack allocations
+proc check_effective_target_callee_realigns_stack { } {
+  if { [istarget x86_64-*-*] || [istarget i?86-*-*] } {
+	return 1
+  }
+  return 0
+}
diff --git a/gcc/toplev.c b/gcc/toplev.c
index 7d2b8fffa0b..6f48e10850d 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -1605,6 +1605,26 @@  process_options (void)
       flag_associative_math = 0;
     }
 
+  /* -fstack-clash-protection is not currently supported on targets
+     where the stack grows up.  */
+  if (flag_stack_clash_protection && !STACK_GROWS_DOWNWARD)
+    {
+      warning_at (UNKNOWN_LOCATION, 0,
+		  "%<-fstack-clash-protection%> is not supported on targets "
+		  "where the stack grows from lower to higher addresses");
+      flag_stack_clash_protection = 0;
+    }
+
+  /* We can not support -fstack-check= and -fstack-clash-protection at
+     the same time.  */
+  if (flag_stack_check != NO_STACK_CHECK && flag_stack_clash_protection)
+    {
+      warning_at (UNKNOWN_LOCATION, 0,
+		  "%<-fstack-check=%> and %<-fstack-clash_protection%> are "
+		  "mutually exclusive.  Disabling %<-fstack-check=%>");
+      flag_stack_check = NO_STACK_CHECK;
+    }
+
   /* With -fcx-limited-range, we do cheap and quick complex arithmetic.  */
   if (flag_cx_limited_range)
     flag_complex_method = 0;