From patchwork Tue May 8 20:40:06 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "H.J. Lu" X-Patchwork-Id: 910456 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (mailfrom) smtp.mailfrom=sourceware.org (client-ip=209.132.180.131; helo=sourceware.org; envelope-from=libc-alpha-return-92273-incoming=patchwork.ozlabs.org@sourceware.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=gmail.com Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; secure) header.d=sourceware.org header.i=@sourceware.org header.b="GmNMHJP7"; dkim-atps=neutral Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 40gWb61Kpbz9s3G for ; Wed, 9 May 2018 06:40:33 +1000 (AEST) DomainKey-Signature: a=rsa-sha1; c=nofws; d=sourceware.org; h=list-id :list-unsubscribe:list-subscribe:list-archive:list-post :list-help:sender:from:to:cc:subject:date:message-id:in-reply-to :references; q=dns; s=default; b=pnPIM7SLfBdTChB3qnSplais689MVPI lrCdrA/61R/6JXwVuX4WpqkjWXNdD5xs+bX95rF9TKZr7s5E05myTPxqglDrmvtp 7VRhV0nhCppgNLsLotEgEhqsLfXDLH4mse6KTtPIBKQZRO9PxRMj7NSn4Q2BkRpH aO2LbLAX1dRs= DKIM-Signature: v=1; a=rsa-sha1; c=relaxed; d=sourceware.org; h=list-id :list-unsubscribe:list-subscribe:list-archive:list-post :list-help:sender:from:to:cc:subject:date:message-id:in-reply-to :references; s=default; bh=azCWJ5HluWYCsIOUW7LJQ0eMgU0=; b=GmNMH JP7w3p3t0I0DzPGs8uVAVcKfryqz/bgeiHuEJYRtTrETuOSgCHYxm1hkw1s0vjKW n8YpBA1LKj1Dv4qUEKUzHmb3LjUfC9+N2GCeS3N+CrvvOjcLl9aErFy0mJQUZAjQ l5BlnfOVq5dufdpPG4SGZOpect0sLDcdFrUnq4= Received: (qmail 75601 invoked by alias); 8 May 2018 20:40:27 -0000 Mailing-List: contact libc-alpha-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: libc-alpha-owner@sourceware.org Delivered-To: mailing list libc-alpha@sourceware.org Received: (qmail 75567 invoked by uid 89); 8 May 2018 20:40:26 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-24.4 required=5.0 tests=AWL, BAYES_00, FREEMAIL_FROM, GIT_PATCH_0, GIT_PATCH_1, GIT_PATCH_2, GIT_PATCH_3, SPF_SOFTFAIL autolearn=ham version=3.3.2 spammy=disturbs, cmpl, Pop, sk:$_GLOBA X-HELO: mga17.intel.com X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False X-ExtLoop1: 1 From: "H.J. Lu" To: libc-alpha@sourceware.org Cc: Carlos O'Donell Subject: [PATCH 08/23] x86: Update vfork to pop shadow stack Date: Tue, 8 May 2018 13:40:06 -0700 Message-Id: <20180508204021.31845-9-hjl.tools@gmail.com> In-Reply-To: <20180508204021.31845-1-hjl.tools@gmail.com> References: <20180508204021.31845-1-hjl.tools@gmail.com> Since we can't change return address on shadow stack, if shadow stack is in use, we need to pop shadow stack and jump back to caller directly. * sysdeps/unix/sysv/linux/i386/vfork.S (SYSCALL_ERROR_HANDLER): Redefine if shadow stack is enabled. (SYSCALL_ERROR_LABEL): Likewise. (__vfork): Pop shadow stack and jump back to to caller directly when shadow stack is in use. * sysdeps/unix/sysv/linux/x86_64/vfork.S (SYSCALL_ERROR_HANDLER): Redefine if shadow stack is enabled. (SYSCALL_ERROR_LABEL): Likewise. (__vfork): Pop shadow stack and jump back to to caller directly when shadow stack is in use. --- sysdeps/unix/sysv/linux/i386/vfork.S | 54 ++++++++++++++++++++++++++ sysdeps/unix/sysv/linux/x86_64/vfork.S | 35 +++++++++++++++++ 2 files changed, 89 insertions(+) diff --git a/sysdeps/unix/sysv/linux/i386/vfork.S b/sysdeps/unix/sysv/linux/i386/vfork.S index 8f40d02d09..06d834d632 100644 --- a/sysdeps/unix/sysv/linux/i386/vfork.S +++ b/sysdeps/unix/sysv/linux/i386/vfork.S @@ -21,6 +21,35 @@ #include #include +#if defined __CET__ && (__CET__ & 2) != 0 +/* When shadow stack is in use, we need to pop shadow stack and jump + back to caller directly. */ +# undef SYSCALL_ERROR_HANDLER +# ifdef PIC +# define SYSCALL_ERROR_HANDLER \ +0: \ + calll .L1; \ +.L1: \ + popl %edx; \ +.L2: \ + addl $_GLOBAL_OFFSET_TABLE_ + (.L2 - .L1), %edx; \ + movl __libc_errno@gotntpoff(%edx), %edx; \ + negl %eax; \ + movl %eax, %gs:(%edx); \ + orl $-1, %eax; \ + jmp 1b; +# else +# define SYSCALL_ERROR_HANDLER \ +0: \ + movl __libc_errno@indntpoff, %edx; \ + negl %eax; \ + movl %eax, %gs:(%edx); \ + orl $-1, %eax; \ + jmp 1b; +# endif +# undef SYSCALL_ERROR_LABEL +# define SYSCALL_ERROR_LABEL 0f +#endif /* Clone the calling process, but without copying the whole address space. The calling process is suspended until the new process exits or is @@ -38,16 +67,41 @@ ENTRY (__vfork) movl $SYS_ify (vfork), %eax int $0x80 +#if !defined __CET__ || (__CET__ & 2) == 0 /* Jump to the return PC. Don't jump directly since this disturbs the branch target cache. Instead push the return address back on the stack. */ pushl %ecx cfi_adjust_cfa_offset (4) +#endif cmpl $-4095, %eax /* Branch forward if it failed. */ jae SYSCALL_ERROR_LABEL +#if defined __CET__ && (__CET__ & 2) != 0 +1: + /* Check if shadow stack is in use. */ + xorl %edx, %edx + rdsspd %edx + testl %edx, %edx + /* Normal return if shadow stack isn't in use. */ + je L(no_shstk) + + /* Pop return address from shadow stack and jump back to caller + directly. */ + movl $1, %edx + incsspd %edx + jmp *%ecx + +L(no_shstk): + /* Jump to the return PC. Don't jump directly since this + disturbs the branch target cache. Instead push the return + address back on the stack. */ + pushl %ecx + cfi_adjust_cfa_offset (4) +#endif + ret PSEUDO_END (__vfork) diff --git a/sysdeps/unix/sysv/linux/x86_64/vfork.S b/sysdeps/unix/sysv/linux/x86_64/vfork.S index e4c8269e3d..cab3ad34a0 100644 --- a/sysdeps/unix/sysv/linux/x86_64/vfork.S +++ b/sysdeps/unix/sysv/linux/x86_64/vfork.S @@ -20,6 +20,18 @@ #include #include +#if defined __CET__ && (__CET__ & 2) != 0 +/* When shadow stack is in use, we need to pop shadow stack and jump + back to caller directly. */ +# undef SYSCALL_ERROR_HANDLER +# define SYSCALL_ERROR_HANDLER \ +0: \ + SYSCALL_SET_ERRNO; \ + or $-1, %RAX_LP; \ + jmp 1b; +# undef SYSCALL_ERROR_LABEL +# define SYSCALL_ERROR_LABEL 0f +#endif /* Clone the calling process, but without copying the whole address space. The calling process is suspended until the new process exits or is @@ -38,13 +50,36 @@ ENTRY (__vfork) movl $SYS_ify (vfork), %eax syscall +#if !defined __CET__ || (__CET__ & 2) == 0 /* Push back the return PC. */ pushq %rdi cfi_adjust_cfa_offset(8) +#endif cmpl $-4095, %eax jae SYSCALL_ERROR_LABEL /* Branch forward if it failed. */ +#if defined __CET__ && (__CET__ & 2) != 0 +1: + /* Check if shadow stack is in use. */ + xorl %esi, %esi + rdsspq %rsi + testq %rsi, %rsi + /* Normal return if shadow stack isn't in use. */ + je L(no_shstk) + + /* Pop return address from shadow stack and jump back to caller + directly. */ + movl $1, %esi + incsspq %rsi + jmp *%rdi + +L(no_shstk): + /* Push back the return PC. */ + pushq %rdi + cfi_adjust_cfa_offset(8) +#endif + /* Normal return. */ ret