@@ -2306,6 +2306,11 @@ fstack-check
Common Alias(fstack-check=, specific, no)
Insert stack checking code into the program. Same as -fstack-check=specific.
+fstack-clash-protection
+Common Report Var(flag_stack_clash_protection)
+Insert code to probe each page of stack space as it is allocated to protect
+from stack-clash style attacks
+
fstack-limit
Common Var(common_deferred_options) Defer
@@ -11333,7 +11333,8 @@ target support in the compiler but comes with the following drawbacks:
@enumerate
@item
Modified allocation strategy for large objects: they are always
-allocated dynamically if their size exceeds a fixed threshold.
+allocated dynamically if their size exceeds a fixed threshold. Note this
+may change the semantics of some code.
@item
Fixed limit on the size of the static frame of functions: when it is
@@ -11348,6 +11349,19 @@ generic implementation, code performance is hampered.
Note that old-style stack checking is also the fallback method for
@samp{specific} if no target support has been added in the compiler.
+@samp{-fstack-check=} is designed for Ada's needs to detect infinite recursion
+and stack overflows. @samp{specific} is an excellent choice when compiling
+Ada code. It is not generally sufficient to protect against stack-clash
+attacks. To protect against those you want @samp{-fstack-clash-protection}.
+
+@item -fstack-clash-protection
+@opindex fstack-clash-protection
+Generate code to prevent stack clash style attacks. When this option is
+enabled, the compiler will only allocate one page of stack space at a time
+and each page is accessed immediately after allocation. Thus, it prevents
+allocations from jumping over any stack guard page provided by the
+operating system.
+
@item -fstack-limit-register=@var{reg}
@itemx -fstack-limit-symbol=@var{sym}
@itemx -fno-stack-limit
@@ -166,7 +166,14 @@ enum permitted_flt_eval_methods
PERMITTED_FLT_EVAL_METHODS_C11
};
-/* Type of stack check. */
+/* Type of stack check.
+
+ Stack checking is designed to detect infinite recursion for Ada
+ programs. Furthermore stack checking tries to ensure that scenario
+ that enough stack space is left to run a signal handler.
+
+ -fstack-check= does not prevent stack-clash style attacks. For that
+ you want -fstack-clash-protection. */
enum stack_check_type
{
/* Do not check the stack. */
new file mode 100644
@@ -0,0 +1,66 @@
+/* The goal here is to ensure that we never consider a call to a noreturn
+ function as a potential tail call.
+
+ Right now GCC discovers potential tail calls by looking at the
+ predecessors of the exit block. A call to a non-return function
+ has no successors and thus can never match that first filter.
+
+ But that could change one day and we want to catch it. The problem
+ is the compiler could potentially optimize a tail call to a nonreturn
+ function, even if the caller has a frame. That breaks the assumption
+ that calls probe *sp when saving the return address that some targets
+ depend on to elide stack probes. */
+
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstack-clash-protection -fdump-tree-tailc -fdump-tree-optimized" } */
+/* { dg-require-effective-target supports_stack_clash_protection } */
+
+extern void foo (void) __attribute__ ((__noreturn__));
+
+
+void
+test_direct_1 (void)
+{
+ foo ();
+}
+
+void
+test_direct_2 (void)
+{
+ return foo ();
+}
+
+void (*indirect)(void)__attribute__ ((noreturn));
+
+
+void
+test_indirect_1 ()
+{
+ (*indirect)();
+}
+
+void
+test_indirect_2 (void)
+{
+ return (*indirect)();;
+}
+
+
+typedef void (*pvfn)() __attribute__ ((noreturn));
+
+void (*indirect_casted)(void);
+
+void
+test_indirect_casted_1 ()
+{
+ (*(pvfn)indirect_casted)();
+}
+
+void
+test_indirect_casted_2 (void)
+{
+ return (*(pvfn)indirect_casted)();
+}
+/* { dg-final { scan-tree-dump-not "tail call" "tailc" } } */
+/* { dg-final { scan-tree-dump-not "tail call" "optimized" } } */
+
@@ -8468,3 +8468,78 @@ proc check_effective_target_arm_coproc4_ok { } {
return [check_cached_effective_target arm_coproc4_ok \
check_effective_target_arm_coproc4_ok_nocache]
}
+
+# Return 1 if the target has support for stack probing designed
+# to avoid stack-clash style attacks.
+#
+# This is used to restrict the stack-clash mitigation tests to
+# just those targets that have been explicitly supported.
+#
+# In addition to the prologue work on those targets, each target's
+# properties should be described in the functions below so that
+# tests do not become a mess of unreadable target conditions.
+#
+proc check_effective_target_supports_stack_clash_protection { } {
+ if { [istarget aarch*-*-*] || [istarget x86_64-*-*]
+ || [istarget i?86-*-*] || [istarget s390*-*-*]
+ || [istarget powerpc*-*-*] || [istarget rs6000*-*-*] } {
+ return 1
+ }
+ return 0
+}
+
+# Return 1 if the target creates a frame pointer for non-leaf functions
+# Note we ignore cases where we apply tail call optimization here.
+proc check_effective_target_frame_pointer_for_non_leaf { } {
+ if { [istarget aarch*-*-*] } {
+ return 1
+ }
+ return 0
+}
+
+# Return 1 if the target's calling sequence or its ABI
+# create implicit stack probes at or prior to function entry.
+proc check_effective_target_caller_implicit_probes { } {
+
+ # On x86/x86_64 the call instruction itself pushes the return
+ # address onto the stack. That is an implicit probe of *sp.
+ if { [istarget x86_64-*-*] || [istarget i?86-*-*] } {
+ return 1
+ }
+
+ # On PPC, the ABI mandates that the address of the outer
+ # frame be stored at *sp. Thus each allocation of stack
+ # space is itself an implicit probe of *sp.
+ if { [istarget powerpc*-*-*] || [istarget rs6000*-*-*] } {
+ return 1
+ }
+
+ # s390's ABI has a register save area allocated by the
+ # caller for use by the callee. The mere existence does
+ # not constitute a probe by the caller, but when the slots
+ # used by the callee those stores are implicit probes.
+ if { [istarget s390*-*-*] } {
+ return 1
+ }
+
+ # Not strictly true on aarch64, but we have agreed that we will
+ # consider any function that pushes SP more than 3kbytes into
+ # the guard page as broken. This essentially means that we can
+ # consider the aarch64 as having a caller implicit probe at
+ # *(sp + 1k).
+ if { [istarget aarch64*-*-*] } {
+ return 1;
+ }
+
+ return 0
+}
+
+# Targets that potentially realign the stack pointer often cause residual
+# stack allocations and make it difficult to elimination loops or residual
+# allocations for dynamic stack allocations
+proc check_effective_target_callee_realigns_stack { } {
+ if { [istarget x86_64-*-*] || [istarget i?86-*-*] } {
+ return 1
+ }
+ return 0
+}
@@ -1591,6 +1591,26 @@ process_options (void)
flag_associative_math = 0;
}
+ /* -fstack-clash-protection is not currently supported on targets
+ where the stack grows up. */
+ if (flag_stack_clash_protection && !STACK_GROWS_DOWNWARD)
+ {
+ warning_at (UNKNOWN_LOCATION, 0,
+ "-fstack-clash_protection is not supproted on targets "
+ "where the stack grows from lower to higher addresses");
+ flag_stack_clash_protection = 0;
+ }
+
+ /* We can not support -fstack-check= and -fstack-clash-protection at
+ the same time. */
+ if (flag_stack_check != NO_STACK_CHECK && flag_stack_clash_protection)
+ {
+ warning_at (UNKNOWN_LOCATION, 0,
+ "-fstack-check= and -fstack-clash_protection are mutually "
+ "exclusive. Disabling -fstack-check=");
+ flag_stack_check = NO_STACK_CHECK;
+ }
+
/* With -fcx-limited-range, we do cheap and quick complex arithmetic. */
if (flag_cx_limited_range)
flag_complex_method = 0;