@@ -99,4 +99,10 @@ enum aarch64_function_type {
AARCH64_FUNCTION_ALL
};
+/* GCC standard stack protector (Canary insertion based) types for AArch64. */
+enum aarch64_stack_protector_type {
+ STACK_PROTECTOR_TRAD,
+ STACK_PROTECTOR_PAUTH
+};
+
#endif
@@ -982,4 +982,25 @@ enum aarch64_pauth_action_type
AARCH64_PAUTH_AUTH
};
+/* Pointer authentication accelerated -fstack-protector. */
+#define AARCH64_PAUTH_SSP_OPTION \
+ (TARGET_PAUTH && aarch64_stack_protector_dialect == STACK_PROTECTOR_PAUTH)
+
+#define AARCH64_PAUTH_SSP \
+ (crtl->stack_protect_guard && AARCH64_PAUTH_SSP_OPTION)
+
+#define AARCH64_PAUTH_SSP_OR_RA_SIGN \
+ (AARCH64_PAUTH_SSP || AARCH64_ENABLE_RETURN_ADDRESS_SIGN)
+
+#ifndef TARGET_LIBC_PROVIDES_SSP
+#define LINK_SSP_SPEC "%{!mstack-protector-dialect=pauth:\
+ %{fstack-protector|fstack-protector-all\
+ |fstack-protector-strong|fstack-protector-explicit:\
+ -lssp_nonshared -lssp}}"
+#endif
+
+/* Don't use GCC default SSP runtime if pointer authentication acceleration
+ enabled. */
+#define ENABLE_DEFAULT_SSP_RUNTIME !(AARCH64_PAUTH_SSP_OPTION)
+
#endif /* GCC_AARCH64_H */
@@ -2993,6 +2993,15 @@ aarch64_layout_frame (void)
= cfun->machine->frame.frame_size - cfun->machine->frame.initial_adjust;
}
+ if (AARCH64_PAUTH_SSP)
+ {
+ cfun->machine->frame.callee_adjust = varargs_and_saved_regs_size;
+ cfun->machine->frame.final_adjust
+ = cfun->machine->frame.frame_size - cfun->machine->frame.callee_adjust;
+ cfun->machine->frame.hard_fp_offset = cfun->machine->frame.callee_adjust;
+ cfun->machine->frame.locals_offset = cfun->machine->frame.hard_fp_offset;
+ }
+
cfun->machine->frame.laid_out = true;
}
@@ -3203,7 +3212,7 @@ aarch64_save_callee_saves (machine_mode mode, HOST_WIDE_INT start_offset,
RTX_FRAME_RELATED_P (insn) = 1;
- if (AARCH64_ENABLE_RETURN_ADDRESS_SIGN && lr_pair_reg != INVALID_REGNUM)
+ if (AARCH64_PAUTH_SSP_OR_RA_SIGN && lr_pair_reg != INVALID_REGNUM)
{
rtx cfi_ops = NULL_RTX;
@@ -3335,7 +3344,7 @@ aarch64_expand_prologue (void)
/* Do return address signing for all functions, even those for which LR is not
pushed onto stack. */
- if (AARCH64_ENABLE_RETURN_ADDRESS_SIGN)
+ if (AARCH64_PAUTH_SSP_OR_RA_SIGN)
{
insn = emit_insn (gen_sign_reg (gen_rtx_REG (Pmode, LR_REGNUM),
gen_rtx_REG (Pmode, LR_REGNUM),
@@ -3368,7 +3377,7 @@ aarch64_expand_prologue (void)
aarch64_push_regs (reg1, reg2, callee_adjust);
/* Generate return address signing dwarf annotation when
omit-frame-pointer. */
- if (AARCH64_ENABLE_RETURN_ADDRESS_SIGN
+ if (AARCH64_PAUTH_SSP_OR_RA_SIGN
&& (reg1 == LR_REGNUM || reg2 == LR_REGNUM))
{
rtx cfi_ops = NULL_RTX;
@@ -3503,7 +3512,7 @@ aarch64_expand_epilogue (bool for_sibcall)
rtx new_cfa = plus_constant (Pmode, stack_pointer_rtx, initial_adjust);
cfi_ops = alloc_reg_note (REG_CFA_DEF_CFA, new_cfa, cfi_ops);
- if (AARCH64_ENABLE_RETURN_ADDRESS_SIGN)
+ if (AARCH64_PAUTH_SSP_OR_RA_SIGN)
REG_NOTES (insn) = aarch64_attach_ra_auth_dwarf_note (cfi_ops, 0);
else
REG_NOTES (insn) = cfi_ops;
@@ -3528,7 +3537,7 @@ aarch64_expand_epilogue (bool for_sibcall)
authentication, as the following stack adjustment will update CFA to
handler's CFA while we want to use the CFA of the function which calls
__builtin_eh_return. */
- if (AARCH64_ENABLE_RETURN_ADDRESS_SIGN
+ if (AARCH64_PAUTH_SSP_OR_RA_SIGN
&& (for_sibcall || !TARGET_PAUTH || crtl->calls_eh_return))
{
insn = emit_insn (gen_auth_reg (gen_rtx_REG (Pmode, LR_REGNUM),
@@ -8737,6 +8746,14 @@ aarch64_override_options (void)
if (aarch64_ra_sign_scope != AARCH64_FUNCTION_NONE && TARGET_ILP32)
error ("Return address signing is only supported on LP64");
+ if (aarch64_stack_protector_dialect == STACK_PROTECTOR_PAUTH && TARGET_ILP32)
+ error ("Pointer authentication based -fstack-protector is only supported "
+ "on LP64.");
+
+ if (aarch64_stack_protector_dialect == STACK_PROTECTOR_PAUTH && !TARGET_PAUTH)
+ error ("Pointer authentication based -fstack-protector is only supported "
+ "on architecture with pointer authentication extension.");
+
/* Make sure we properly set up the explicit options. */
if ((aarch64_cpu_string && valid_cpu)
|| (aarch64_tune_string && valid_tune))
@@ -581,9 +581,7 @@
[(return)]
""
{
- if (AARCH64_ENABLE_RETURN_ADDRESS_SIGN
- && TARGET_PAUTH
- && !crtl->calls_eh_return)
+ if (AARCH64_PAUTH_SSP_OR_RA_SIGN && TARGET_PAUTH && !crtl->calls_eh_return)
{
if (aarch64_pauth_key == AARCH64_PAUTH_IKEY_A)
return "retaa";
@@ -198,3 +198,17 @@ Common Var(flag_mlow_precision_div) Optimization
Enable the division approximation. Enabling this reduces
precision of division results to about 16 bits for
single precision and to 32 bits for double precision.
+
+Enum
+Name(stack_protector_type) Type(enum aarch64_stack_protector_type)
+The possible stack protector dialects:
+
+EnumValue
+Enum(stack_protector_type) String(trad) Value(STACK_PROTECTOR_TRAD)
+
+EnumValue
+Enum(stack_protector_type) String(pauth) Value(STACK_PROTECTOR_PAUTH)
+
+mstack-protector-dialect=
+Target RejectNegative Joined Enum(stack_protector_type) Var(aarch64_stack_protector_dialect) Init(STACK_PROTECTOR_TRAD) Save
+Specify stack protector dialect.
@@ -13357,6 +13357,12 @@ Select the key used for return address signing. Permissible values are
@samp{a_key} for A key and @samp{b_key} for B key. @samp{a_key} is the default
value.
+@item -mstack-protector-dialect=@var{dialect}
+@opindex mstack-protector-dialect
+Select the dialect for GCC -fstack-protector. @samp{trad} for GCC default
+implementation and @samp{pauth} for pointer authentication accelerated
+implementation for AArch64 LP64.
+
@end table
@subsubsection @option{-march} and @option{-mcpu} Feature Modifiers