diff mbox series

[v2,7/7] powerpc/kprobes: Allow probing on any ftrace address

Message ID da4200e511f4863b153b0721f3913a75aa3bf80d.1561634177.git.naveen.n.rao@linux.vnet.ibm.com (mailing list archive)
State Changes Requested
Headers show
Series powerpc/ftrace: Patch out -mprofile-kernel instructions | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success Successfully applied on branch next (c7d64b560ce80d8c44f082eee8352f0778a73195)
snowpatch_ozlabs/build-ppc64le success Build succeeded
snowpatch_ozlabs/build-ppc64be success Build succeeded
snowpatch_ozlabs/build-ppc64e success Build succeeded
snowpatch_ozlabs/build-pmac32 success Build succeeded
snowpatch_ozlabs/checkpatch success total: 0 errors, 0 warnings, 0 checks, 64 lines checked

Commit Message

Naveen N. Rao June 27, 2019, 11:23 a.m. UTC
With KPROBES_ON_FTRACE, kprobe is allowed to be inserted on instructions
that branch to _mcount (referred to as ftrace location). With
-mprofile-kernel, we now include the preceding 'mflr r0' as being part
of the ftrace location.

However, by default, probing on an instruction that is not actually the
branch to _mcount() is prohibited, as that is considered to not be at an
instruction boundary. This is not the case on powerpc, so allow the same
by overriding arch_check_ftrace_location()

In addition, we update kprobe_ftrace_handler() to detect this scenarios
and to pass the proper nip to the pre and post probe handlers.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/kernel/kprobes-ftrace.c | 32 +++++++++++++++++++++++++++-
 1 file changed, 31 insertions(+), 1 deletion(-)

Comments

Masami Hiramatsu (Google) June 27, 2019, 2:19 p.m. UTC | #1
On Thu, 27 Jun 2019 16:53:55 +0530
"Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com> wrote:

> With KPROBES_ON_FTRACE, kprobe is allowed to be inserted on instructions
> that branch to _mcount (referred to as ftrace location). With
> -mprofile-kernel, we now include the preceding 'mflr r0' as being part
> of the ftrace location.
> 
> However, by default, probing on an instruction that is not actually the
> branch to _mcount() is prohibited, as that is considered to not be at an
> instruction boundary. This is not the case on powerpc, so allow the same
> by overriding arch_check_ftrace_location()
> 
> In addition, we update kprobe_ftrace_handler() to detect this scenarios
> and to pass the proper nip to the pre and post probe handlers.
> 
> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>

Looks good to me.

Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>

Thank you!

> ---
>  arch/powerpc/kernel/kprobes-ftrace.c | 32 +++++++++++++++++++++++++++-
>  1 file changed, 31 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
> index 972cb28174b2..23c840748183 100644
> --- a/arch/powerpc/kernel/kprobes-ftrace.c
> +++ b/arch/powerpc/kernel/kprobes-ftrace.c
> @@ -12,15 +12,35 @@
>  #include <linux/preempt.h>
>  #include <linux/ftrace.h>
>  
> +/*
> + * With -mprofile-kernel, we patch two instructions -- the branch to _mcount
> + * as well as the preceding 'mflr r0'. Both these instructions are claimed
> + * by ftrace and we should allow probing on either instruction.
> + */
> +int arch_check_ftrace_location(struct kprobe *p)
> +{
> +	if (ftrace_location((unsigned long)p->addr))
> +		p->flags |= KPROBE_FLAG_FTRACE;
> +	return 0;
> +}
> +
>  /* Ftrace callback handler for kprobes */
>  void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
>  			   struct ftrace_ops *ops, struct pt_regs *regs)
>  {
>  	struct kprobe *p;
> +	int mflr_kprobe = 0;
>  	struct kprobe_ctlblk *kcb;
>  
>  	p = get_kprobe((kprobe_opcode_t *)nip);
> -	if (unlikely(!p) || kprobe_disabled(p))
> +	if (!p) {
> +		p = get_kprobe((kprobe_opcode_t *)(nip - MCOUNT_INSN_SIZE));
> +		if (unlikely(!p))
> +			return;
> +		mflr_kprobe = 1;
> +	}
> +
> +	if (kprobe_disabled(p))
>  		return;
>  
>  	kcb = get_kprobe_ctlblk();
> @@ -33,6 +53,9 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
>  		 */
>  		regs->nip -= MCOUNT_INSN_SIZE;
>  
> +		if (mflr_kprobe)
> +			regs->nip -= MCOUNT_INSN_SIZE;
> +
>  		__this_cpu_write(current_kprobe, p);
>  		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
>  		if (!p->pre_handler || !p->pre_handler(p, regs)) {
> @@ -45,6 +68,8 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
>  				kcb->kprobe_status = KPROBE_HIT_SSDONE;
>  				p->post_handler(p, regs, 0);
>  			}
> +			if (mflr_kprobe)
> +				regs->nip += MCOUNT_INSN_SIZE;
>  		}
>  		/*
>  		 * If pre_handler returns !0, it changes regs->nip. We have to
> @@ -57,6 +82,11 @@ NOKPROBE_SYMBOL(kprobe_ftrace_handler);
>  
>  int arch_prepare_kprobe_ftrace(struct kprobe *p)
>  {
> +	if ((unsigned long)p->addr & 0x03) {
> +		pr_err("Attempt to register kprobe at an unaligned address\n");
> +		return -EILSEQ;
> +	}
> +
>  	p->ainsn.insn = NULL;
>  	p->ainsn.boostable = -1;
>  	return 0;
> -- 
> 2.22.0
>
diff mbox series

Patch

diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 972cb28174b2..23c840748183 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -12,15 +12,35 @@ 
 #include <linux/preempt.h>
 #include <linux/ftrace.h>
 
+/*
+ * With -mprofile-kernel, we patch two instructions -- the branch to _mcount
+ * as well as the preceding 'mflr r0'. Both these instructions are claimed
+ * by ftrace and we should allow probing on either instruction.
+ */
+int arch_check_ftrace_location(struct kprobe *p)
+{
+	if (ftrace_location((unsigned long)p->addr))
+		p->flags |= KPROBE_FLAG_FTRACE;
+	return 0;
+}
+
 /* Ftrace callback handler for kprobes */
 void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
 			   struct ftrace_ops *ops, struct pt_regs *regs)
 {
 	struct kprobe *p;
+	int mflr_kprobe = 0;
 	struct kprobe_ctlblk *kcb;
 
 	p = get_kprobe((kprobe_opcode_t *)nip);
-	if (unlikely(!p) || kprobe_disabled(p))
+	if (!p) {
+		p = get_kprobe((kprobe_opcode_t *)(nip - MCOUNT_INSN_SIZE));
+		if (unlikely(!p))
+			return;
+		mflr_kprobe = 1;
+	}
+
+	if (kprobe_disabled(p))
 		return;
 
 	kcb = get_kprobe_ctlblk();
@@ -33,6 +53,9 @@  void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
 		 */
 		regs->nip -= MCOUNT_INSN_SIZE;
 
+		if (mflr_kprobe)
+			regs->nip -= MCOUNT_INSN_SIZE;
+
 		__this_cpu_write(current_kprobe, p);
 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 		if (!p->pre_handler || !p->pre_handler(p, regs)) {
@@ -45,6 +68,8 @@  void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
 				kcb->kprobe_status = KPROBE_HIT_SSDONE;
 				p->post_handler(p, regs, 0);
 			}
+			if (mflr_kprobe)
+				regs->nip += MCOUNT_INSN_SIZE;
 		}
 		/*
 		 * If pre_handler returns !0, it changes regs->nip. We have to
@@ -57,6 +82,11 @@  NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 
 int arch_prepare_kprobe_ftrace(struct kprobe *p)
 {
+	if ((unsigned long)p->addr & 0x03) {
+		pr_err("Attempt to register kprobe at an unaligned address\n");
+		return -EILSEQ;
+	}
+
 	p->ainsn.insn = NULL;
 	p->ainsn.boostable = -1;
 	return 0;