diff mbox

[1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE

Message ID 1cc8a5580ae3a642fea137b7fe9c526402d4af33.1487098440.git.naveen.n.rao@linux.vnet.ibm.com (mailing list archive)
State Superseded
Headers show

Commit Message

Naveen N. Rao Feb. 14, 2017, 6:58 p.m. UTC
Allow kprobes to be placed on ftrace _mcount() call sites. This
optimization avoids the use of a trap, by riding on ftrace
infrastructure.

This depends on HAVE_DYNAMIC_FTRACE_WITH_REGS which depends on
MPROFILE_KERNEL, which is only currently enabled on powerpc64le with
newer toolchains.

Based on the x86 code by Masami.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/Kconfig                 |   1 +
 arch/powerpc/include/asm/kprobes.h   |  10 ++++
 arch/powerpc/kernel/Makefile         |   3 ++
 arch/powerpc/kernel/kprobes-ftrace.c | 100 +++++++++++++++++++++++++++++++++++
 arch/powerpc/kernel/kprobes.c        |   4 +-
 arch/powerpc/kernel/optprobes.c      |   3 ++
 6 files changed, 120 insertions(+), 1 deletion(-)
 create mode 100644 arch/powerpc/kernel/kprobes-ftrace.c

Comments

Ananth N Mavinakayanahalli Feb. 15, 2017, 4:28 a.m. UTC | #1
On Wed, Feb 15, 2017 at 12:28:34AM +0530, Naveen N. Rao wrote:
> Allow kprobes to be placed on ftrace _mcount() call sites. This
> optimization avoids the use of a trap, by riding on ftrace
> infrastructure.
> 
> This depends on HAVE_DYNAMIC_FTRACE_WITH_REGS which depends on
> MPROFILE_KERNEL, which is only currently enabled on powerpc64le with
> newer toolchains.
> 
> Based on the x86 code by Masami.
> 
> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
> ---
>  arch/powerpc/Kconfig                 |   1 +
>  arch/powerpc/include/asm/kprobes.h   |  10 ++++
>  arch/powerpc/kernel/Makefile         |   3 ++
>  arch/powerpc/kernel/kprobes-ftrace.c | 100 +++++++++++++++++++++++++++++++++++
>  arch/powerpc/kernel/kprobes.c        |   4 +-
>  arch/powerpc/kernel/optprobes.c      |   3 ++
>  6 files changed, 120 insertions(+), 1 deletion(-)
>  create mode 100644 arch/powerpc/kernel/kprobes-ftrace.c

You'll also need to update
Documentation/features/debug/kprobes-on-ftrace/arch-support.txt

> +/* Ftrace callback handler for kprobes */
> +void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
> +			   struct ftrace_ops *ops, struct pt_regs *regs)
> +{
> +	struct kprobe *p;
> +	struct kprobe_ctlblk *kcb;
> +	unsigned long flags;
> +
> +	/* Disable irq for emulating a breakpoint and avoiding preempt */
> +	local_irq_save(flags);
> +	hard_irq_disable();
> +
> +	p = get_kprobe((kprobe_opcode_t *)nip);
> +	if (unlikely(!p) || kprobe_disabled(p))
> +		goto end;
> +
> +	kcb = get_kprobe_ctlblk();
> +	if (kprobe_running()) {
> +		kprobes_inc_nmissed_count(p);
> +	} else {
> +		unsigned long orig_nip = regs->nip;
> +		/* Kprobe handler expects regs->nip = nip + 1 as breakpoint hit */

Can you clarify this? On powerpc, the regs->nip at the time of
breakpoint hit points to the probed instruction, not the one after.

Ananth
Naveen N. Rao Feb. 16, 2017, 7:48 p.m. UTC | #2
On 2017/02/15 09:58AM, Ananth N Mavinakayanahalli wrote:
> On Wed, Feb 15, 2017 at 12:28:34AM +0530, Naveen N. Rao wrote:
> > Allow kprobes to be placed on ftrace _mcount() call sites. This
> > optimization avoids the use of a trap, by riding on ftrace
> > infrastructure.
> > 
> > This depends on HAVE_DYNAMIC_FTRACE_WITH_REGS which depends on
> > MPROFILE_KERNEL, which is only currently enabled on powerpc64le with
> > newer toolchains.
> > 
> > Based on the x86 code by Masami.
> > 
> > Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
> > ---
> >  arch/powerpc/Kconfig                 |   1 +
> >  arch/powerpc/include/asm/kprobes.h   |  10 ++++
> >  arch/powerpc/kernel/Makefile         |   3 ++
> >  arch/powerpc/kernel/kprobes-ftrace.c | 100 +++++++++++++++++++++++++++++++++++
> >  arch/powerpc/kernel/kprobes.c        |   4 +-
> >  arch/powerpc/kernel/optprobes.c      |   3 ++
> >  6 files changed, 120 insertions(+), 1 deletion(-)
> >  create mode 100644 arch/powerpc/kernel/kprobes-ftrace.c
> 
> You'll also need to update
> Documentation/features/debug/kprobes-on-ftrace/arch-support.txt

Sure.

> 
> > +/* Ftrace callback handler for kprobes */
> > +void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
> > +			   struct ftrace_ops *ops, struct pt_regs *regs)
> > +{
> > +	struct kprobe *p;
> > +	struct kprobe_ctlblk *kcb;
> > +	unsigned long flags;
> > +
> > +	/* Disable irq for emulating a breakpoint and avoiding preempt */
> > +	local_irq_save(flags);
> > +	hard_irq_disable();
> > +
> > +	p = get_kprobe((kprobe_opcode_t *)nip);
> > +	if (unlikely(!p) || kprobe_disabled(p))
> > +		goto end;
> > +
> > +	kcb = get_kprobe_ctlblk();
> > +	if (kprobe_running()) {
> > +		kprobes_inc_nmissed_count(p);
> > +	} else {
> > +		unsigned long orig_nip = regs->nip;
> > +		/* Kprobe handler expects regs->nip = nip + 1 as breakpoint hit */
> 
> Can you clarify this? On powerpc, the regs->nip at the time of
> breakpoint hit points to the probed instruction, not the one after.

Ah -- great catch! As it turns out, we actually need to set this back by 
an instruction due to the way ftrace works. I'll make the change.


Thanks,
Naveen
diff mbox

Patch

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 260dd6a371e0..78419919556d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -101,6 +101,7 @@  config PPC
 	select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
 	select HAVE_KPROBES
 	select HAVE_OPTPROBES if PPC64
+	select HAVE_KPROBES_ON_FTRACE
 	select HAVE_ARCH_KGDB
 	select HAVE_KRETPROBES
 	select HAVE_ARCH_TRACEHOOK
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index e7ada061aa12..3305a12286fa 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -153,6 +153,16 @@  extern int kprobe_exceptions_notify(struct notifier_block *self,
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 extern int kprobe_handler(struct pt_regs *regs);
 extern int kprobe_post_handler(struct pt_regs *regs);
+#ifdef CONFIG_KPROBES_ON_FTRACE
+extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+			   struct kprobe_ctlblk *kcb);
+#else
+static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+				  struct kprobe_ctlblk *kcb)
+{
+	return 0;
+}
+#endif
 #else
 static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
 static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index a048b37b9b27..88b21427ccc7 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -101,6 +101,7 @@  obj-$(CONFIG_BOOTX_TEXT)	+= btext.o
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_KPROBES)		+= kprobes.o
 obj-$(CONFIG_OPTPROBES)		+= optprobes.o optprobes_head.o
+obj-$(CONFIG_KPROBES_ON_FTRACE)	+= kprobes-ftrace.o
 obj-$(CONFIG_UPROBES)		+= uprobes.o
 obj-$(CONFIG_PPC_UDBG_16550)	+= legacy_serial.o udbg_16550.o
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
@@ -154,6 +155,8 @@  GCOV_PROFILE_machine_kexec_32.o := n
 UBSAN_SANITIZE_machine_kexec_32.o := n
 GCOV_PROFILE_kprobes.o := n
 UBSAN_SANITIZE_kprobes.o := n
+GCOV_PROFILE_kprobes-ftrace.o := n
+UBSAN_SANITIZE_kprobes-ftrace.o := n
 UBSAN_SANITIZE_vdso.o := n
 
 extra-$(CONFIG_PPC_FPU)		+= fpu.o
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
new file mode 100644
index 000000000000..0377b3013723
--- /dev/null
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -0,0 +1,100 @@ 
+/*
+ * Dynamic Ftrace based Kprobes Optimization
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Hitachi Ltd., 2012
+ * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+ *		  IBM Corporation
+ */
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/preempt.h>
+#include <linux/ftrace.h>
+
+static nokprobe_inline
+int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+		      struct kprobe_ctlblk *kcb, unsigned long orig_nip)
+{
+	/*
+	 * Emulate singlestep (and also recover regs->nip)
+	 * as if there is a nop
+	 */
+	regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+	if (unlikely(p->post_handler)) {
+		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+		p->post_handler(p, regs, 0);
+	}
+	__this_cpu_write(current_kprobe, NULL);
+	if (orig_nip)
+		regs->nip = orig_nip;
+	return 1;
+}
+
+int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+		    struct kprobe_ctlblk *kcb)
+{
+	if (kprobe_ftrace(p))
+		return __skip_singlestep(p, regs, kcb, 0);
+	else
+		return 0;
+}
+NOKPROBE_SYMBOL(skip_singlestep);
+
+/* Ftrace callback handler for kprobes */
+void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
+			   struct ftrace_ops *ops, struct pt_regs *regs)
+{
+	struct kprobe *p;
+	struct kprobe_ctlblk *kcb;
+	unsigned long flags;
+
+	/* Disable irq for emulating a breakpoint and avoiding preempt */
+	local_irq_save(flags);
+	hard_irq_disable();
+
+	p = get_kprobe((kprobe_opcode_t *)nip);
+	if (unlikely(!p) || kprobe_disabled(p))
+		goto end;
+
+	kcb = get_kprobe_ctlblk();
+	if (kprobe_running()) {
+		kprobes_inc_nmissed_count(p);
+	} else {
+		unsigned long orig_nip = regs->nip;
+		/* Kprobe handler expects regs->nip = nip + 1 as breakpoint hit */
+		regs->nip = nip + sizeof(kprobe_opcode_t);
+
+		__this_cpu_write(current_kprobe, p);
+		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+		if (!p->pre_handler || !p->pre_handler(p, regs))
+			__skip_singlestep(p, regs, kcb, orig_nip);
+		/*
+		 * If pre_handler returns !0, it sets regs->nip and
+		 * resets current kprobe.
+		 */
+	}
+end:
+	local_irq_restore(flags);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+	p->ainsn.insn = NULL;
+	p->ainsn.boostable = -1;
+	return 0;
+}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index c213637b9d25..dab5e54f949c 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -233,7 +233,9 @@  int __kprobes kprobe_handler(struct pt_regs *regs)
 			}
 			p = __this_cpu_read(current_kprobe);
 			if (p->break_handler && p->break_handler(p, regs)) {
-				goto ss_probe;
+				if (!skip_singlestep(p, regs, kcb))
+					goto ss_probe;
+				ret = 1;
 			}
 		}
 		goto no_kprobe;
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index e51a045f3d3b..a8f414a0b141 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -70,6 +70,9 @@  static unsigned long can_optimize(struct kprobe *p)
 	struct instruction_op op;
 	unsigned long nip = 0;
 
+	if (unlikely(kprobe_ftrace(p)))
+		return 0;
+
 	/*
 	 * kprobe placed for kretprobe during boot time
 	 * has a 'nop' instruction, which can be emulated.