diff mbox

[08/12,i386] Modify ix86_compute_frame_layout for -mcall-ms2sysv-xlogues

Message ID 20170427080932.11703-8-daniel.santos@pobox.com
State New
Headers show

Commit Message

Daniel Santos April 27, 2017, 8:09 a.m. UTC
ix86_compute_frame_layout will now populate fields added to structs
machine_function and ix86_frame and modify the frame layout specifics to
facilitate the use of save & restore stubs.  This is also where we init
stub_managed_regs to track which register saves & restores are being
managed by the out-of-line stub and which are being managed inline, as
it is possible to have registers being managed both inline and
out-of-line when inline asm explicitly clobbers a register.

Signed-off-by: Daniel Santos <daniel.santos@pobox.com>
---
 gcc/config/i386/i386.c | 94 +++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 90 insertions(+), 4 deletions(-)
diff mbox

Patch

diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 4f0cb7dd6cc..debfe457d97 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -2715,12 +2715,29 @@  struct GTY(()) stack_local_entry {
    saved frame pointer			if frame_pointer_needed
 					<- HARD_FRAME_POINTER
    [saved regs]
-					<- regs_save_offset
+					<- reg_save_offset
    [padding0]
 					<- stack_realign_offset
    [saved SSE regs]
+	OR
+   [stub-saved registers for ms x64 --> sysv clobbers
+			<- Start of out-of-line, stub-saved/restored regs
+			   (see libgcc/config/i386/(sav|res)ms64*.S)
+     [XMM6-15]
+     [RSI]
+     [RDI]
+     [?RBX]		only if RBX is clobbered
+     [?RBP]		only if RBP and RBX are clobbered
+     [?R12]		only if R12 and all previous regs are clobbered
+     [?R13]		only if R13 and all previous regs are clobbered
+     [?R14]		only if R14 and all previous regs are clobbered
+     [?R15]		only if R15 and all previous regs are clobbered
+			<- end of stub-saved/restored regs
+     [padding1]
+   ]
+					<- outlined_save_offset
 					<- sse_regs_save_offset
-   [padding1]          |
+   [padding2]
 		       |		<- FRAME_POINTER
    [va_arg registers]  |
 		       |
@@ -2745,6 +2762,7 @@  struct ix86_frame
   HOST_WIDE_INT reg_save_offset;
   HOST_WIDE_INT stack_realign_allocate_offset;
   HOST_WIDE_INT stack_realign_offset;
+  HOST_WIDE_INT outlined_save_offset;
   HOST_WIDE_INT sse_reg_save_offset;
 
   /* When save_regs_using_mov is set, emit prologue using
@@ -12802,6 +12820,15 @@  ix86_builtin_setjmp_frame_value (void)
   return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
 }
 
+/* Disables out-of-lined msabi to sysv pro/epilogues and emits a warning if
+   warn_once is null, or *warn_once is zero.  */
+static void disable_call_ms2sysv_xlogues (const char *feature)
+{
+  cfun->machine->call_ms2sysv = false;
+  warning (OPT_mcall_ms2sysv_xlogues, "not currently compatible with %s.",
+	   feature);
+}
+
 /* When using -fsplit-stack, the allocation routines set a field in
    the TCB to the bottom of the stack plus this much space, measured
    in bytes.  */
@@ -12820,9 +12847,50 @@  ix86_compute_frame_layout (struct ix86_frame *frame)
   HOST_WIDE_INT size = get_frame_size ();
   HOST_WIDE_INT to_allocate;
 
+  CLEAR_HARD_REG_SET (stub_managed_regs);
+
+  /* m->call_ms2sysv is initially enabled in ix86_expand_call for all 64-bit
+   * ms_abi functions that call a sysv function.  We now need to prune away
+   * cases where it should be disabled.  */
+  if (TARGET_64BIT && m->call_ms2sysv)
+  {
+    gcc_assert (TARGET_64BIT_MS_ABI);
+    gcc_assert (TARGET_CALL_MS2SYSV_XLOGUES);
+    gcc_assert (!TARGET_SEH);
+
+    if (!TARGET_SSE)
+      m->call_ms2sysv = false;
+
+    /* Don't break hot-patched functions.  */
+    else if (ix86_function_ms_hook_prologue (current_function_decl))
+      m->call_ms2sysv = false;
+
+    /* TODO: Cases not yet examined.  */
+    else if (crtl->calls_eh_return)
+      disable_call_ms2sysv_xlogues ("__builtin_eh_return");
+
+    else if (ix86_static_chain_on_stack)
+      disable_call_ms2sysv_xlogues ("static call chains");
+
+    else if (ix86_using_red_zone ())
+      disable_call_ms2sysv_xlogues ("red zones");
+
+    else if (flag_split_stack)
+      disable_call_ms2sysv_xlogues ("split stack");
+
+    /* Finally, compute which registers the stub will manage.  */
+    else
+      {
+	unsigned count = xlogue_layout
+			 ::compute_stub_managed_regs (stub_managed_regs);
+	m->call_ms2sysv_extra_regs = count - xlogue_layout::MIN_REGS;
+      }
+  }
+
   frame->nregs = ix86_nsaved_regs ();
   frame->nsseregs = ix86_nsaved_sseregs ();
-  CLEAR_HARD_REG_SET (stub_managed_regs);
+  m->call_ms2sysv_pad_in = 0;
+  m->call_ms2sysv_pad_out = 0;
 
   /* 64-bit MS ABI seem to require stack alignment to be always 16,
      except for function prologues, leaf functions and when the defult
@@ -12926,8 +12994,26 @@  ix86_compute_frame_layout (struct ix86_frame *frame)
     offset = ROUND_UP (offset, stack_alignment_needed);
   frame->stack_realign_offset = offset;
 
+  if (TARGET_64BIT && m->call_ms2sysv)
+    {
+      gcc_assert (stack_alignment_needed >= 16);
+      gcc_assert (!frame->nsseregs);
+
+      m->call_ms2sysv_pad_in = !!(offset & UNITS_PER_WORD);
+
+      /* Select an appropriate layout for incoming stack offset.  */
+      const struct xlogue_layout &xlogue = xlogue_layout::get_instance ();
+
+      if ((offset + xlogue.get_stack_space_used ()) & UNITS_PER_WORD)
+	m->call_ms2sysv_pad_out = 1;
+
+      offset += xlogue.get_stack_space_used ();
+      gcc_assert (!(offset & 0xf));
+      frame->outlined_save_offset = offset;
+    }
+
   /* Align and set SSE register save area.  */
-  if (frame->nsseregs)
+  else if (frame->nsseregs)
     {
       /* The only ABI that has saved SSE registers (Win64) also has a
 	 16-byte aligned default stack.  However, many programs violate