@@ -6,7 +6,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/cpu.h>
-#include <linux/ptrace.h>
#include <linux/debugfs.h>
#include <asm/tlbflush.h>
@@ -101,9 +100,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
{
unsigned cpu = smp_processor_id();
- /* Null tsk means switching to kernel, so that's safe */
- if (ibpb_inuse && tsk &&
- ___ptrace_may_access(tsk, current, PTRACE_MODE_IBPB))
+ if (ibpb_inuse && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
if (likely(prev != next)) {
@@ -57,17 +57,9 @@ extern void exit_ptrace(struct task_struct *tracer);
#define PTRACE_MODE_READ 0x01
#define PTRACE_MODE_ATTACH 0x02
#define PTRACE_MODE_NOAUDIT 0x04
-#define PTRACE_MODE_NOACCESS_CHK 0x20
-
-#define PTRACE_MODE_IBPB (PTRACE_MODE_ATTACH | PTRACE_MODE_NOAUDIT \
- | PTRACE_MODE_NOACCESS_CHK)
-
/* Returns true on success, false on denial. */
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
-extern int ___ptrace_may_access(struct task_struct *cur,
- struct task_struct *task, unsigned int mode);
-
static inline int ptrace_reparented(struct task_struct *child)
{
return !same_thread_group(child->real_parent, child->parent);
@@ -222,10 +222,9 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
}
/* Returns 0 on success, -errno on denial. */
-int ___ptrace_may_access(struct task_struct *cur, struct task_struct *task,
- unsigned int mode)
+static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
{
- const struct cred *cred = __task_cred(cur), *tcred;
+ const struct cred *cred = current_cred(), *tcred;
struct mm_struct *mm;
/* May we inspect the given task?
@@ -238,7 +237,7 @@ int ___ptrace_may_access(struct task_struct *cur, struct task_struct *task,
*/
/* Don't let security modules deny introspection */
- if (same_thread_group(task, cur))
+ if (same_thread_group(task, current))
return 0;
rcu_read_lock();
tcred = __task_cred(task);
@@ -261,16 +260,7 @@ ok:
!ptrace_has_cap(mm->user_ns, mode)))
return -EPERM;
- if (!(mode & PTRACE_MODE_NOACCESS_CHK))
- return security_ptrace_access_check(task, mode);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(___ptrace_may_access);
-
-static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
-{
- return ___ptrace_may_access(current, task, mode);
+ return security_ptrace_access_check(task, mode);
}
bool ptrace_may_access(struct task_struct *task, unsigned int mode)
BugLink: https://bugs.launchpad.net/bugs/1759920 This reverts commit 51aaffc323e2db4878e07eefd2724c632a2010fc. Using a ptrace access check in the middle of a task switch was causing a hard lockup in some cases when the old task was confined by AppArmor. If the AppArmor policy for the the old task didn't allow the task to ptrace the new task, AppArmor would attempt to emit an audit message and deadlock on the task's pi_lock would occur. The fix is to revert this change and go with upstream's implementation that uses the task's dumpable state to determine if IBPB should be used. Signed-off-by: Tyler Hicks <tyhicks@canonical.com> --- arch/x86/mm/tlb.c | 5 +---- include/linux/ptrace.h | 8 -------- kernel/ptrace.c | 18 ++++-------------- 3 files changed, 5 insertions(+), 26 deletions(-)