diff mbox series

[RFC,bpf-next,v2,2/4] tracing/kprobe: bpf: Compare instruction pointer with original one

Message ID 151427444611.32561.15006958504436049655.stgit@devbox
State Changes Requested, archived
Delegated to: BPF Maintainers
Headers show
Series Separate error injection table from kprobes | expand

Commit Message

Masami Hiramatsu (Google) Dec. 26, 2017, 7:47 a.m. UTC
Compare instruction pointer with original one on the
stack instead using per-cpu bpf_kprobe_override flag.

This patch also consolidates reset_current_kprobe() and
preempt_enable_no_resched() blocks. Those can be done
in one place.

Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
---
 kernel/trace/bpf_trace.c    |    1 -
 kernel/trace/trace_kprobe.c |   21 +++++++--------------
 2 files changed, 7 insertions(+), 15 deletions(-)

Comments

Alexei Starovoitov Dec. 27, 2017, 2 a.m. UTC | #1
On Tue, Dec 26, 2017 at 04:47:26PM +0900, Masami Hiramatsu wrote:
> Compare instruction pointer with original one on the
> stack instead using per-cpu bpf_kprobe_override flag.
> 
> This patch also consolidates reset_current_kprobe() and
> preempt_enable_no_resched() blocks. Those can be done
> in one place.
> 
> Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
> ---
>  kernel/trace/bpf_trace.c    |    1 -
>  kernel/trace/trace_kprobe.c |   21 +++++++--------------
>  2 files changed, 7 insertions(+), 15 deletions(-)
> 
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index d663660f8392..cefa9b0e396c 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -83,7 +83,6 @@ EXPORT_SYMBOL_GPL(trace_call_bpf);
>  #ifdef CONFIG_BPF_KPROBE_OVERRIDE
>  BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
>  {
> -	__this_cpu_write(bpf_kprobe_override, 1);
>  	regs_set_return_value(regs, rc);
>  	arch_ftrace_kprobe_override_function(regs);
>  	return 0;
> diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
> index 265e3e27e8dc..a7c7035963f2 100644
> --- a/kernel/trace/trace_kprobe.c
> +++ b/kernel/trace/trace_kprobe.c
> @@ -42,8 +42,6 @@ struct trace_kprobe {
>  	(offsetof(struct trace_kprobe, tp.args) +	\
>  	(sizeof(struct probe_arg) * (n)))
>  
> -DEFINE_PER_CPU(int, bpf_kprobe_override);
> -
>  static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
>  {
>  	return tk->rp.handler != NULL;
> @@ -1204,6 +1202,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
>  	int rctx;
>  
>  	if (bpf_prog_array_valid(call)) {
> +		unsigned long orig_ip = instruction_pointer(regs);
>  		int ret;
>  
>  		ret = trace_call_bpf(call, regs);
> @@ -1211,12 +1210,13 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
>  		/*
>  		 * We need to check and see if we modified the pc of the
>  		 * pt_regs, and if so clear the kprobe and return 1 so that we
> -		 * don't do the instruction skipping.  Also reset our state so
> -		 * we are clean the next pass through.
> +		 * don't do the single stepping.
> +		 * The ftrace kprobe handler leaves it up to us to re-enable
> +		 * preemption here before returning if we've modified the ip.
>  		 */
> -		if (__this_cpu_read(bpf_kprobe_override)) {
> -			__this_cpu_write(bpf_kprobe_override, 0);
> +		if (orig_ip != instruction_pointer(regs)) {
>  			reset_current_kprobe();
> +			preempt_enable_no_resched();

This is great idea.
Acked-by: Alexei Starovoitov <ast@kernel.org>
diff mbox series

Patch

diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d663660f8392..cefa9b0e396c 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -83,7 +83,6 @@  EXPORT_SYMBOL_GPL(trace_call_bpf);
 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
 {
-	__this_cpu_write(bpf_kprobe_override, 1);
 	regs_set_return_value(regs, rc);
 	arch_ftrace_kprobe_override_function(regs);
 	return 0;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 265e3e27e8dc..a7c7035963f2 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -42,8 +42,6 @@  struct trace_kprobe {
 	(offsetof(struct trace_kprobe, tp.args) +	\
 	(sizeof(struct probe_arg) * (n)))
 
-DEFINE_PER_CPU(int, bpf_kprobe_override);
-
 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
 {
 	return tk->rp.handler != NULL;
@@ -1204,6 +1202,7 @@  kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 	int rctx;
 
 	if (bpf_prog_array_valid(call)) {
+		unsigned long orig_ip = instruction_pointer(regs);
 		int ret;
 
 		ret = trace_call_bpf(call, regs);
@@ -1211,12 +1210,13 @@  kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 		/*
 		 * We need to check and see if we modified the pc of the
 		 * pt_regs, and if so clear the kprobe and return 1 so that we
-		 * don't do the instruction skipping.  Also reset our state so
-		 * we are clean the next pass through.
+		 * don't do the single stepping.
+		 * The ftrace kprobe handler leaves it up to us to re-enable
+		 * preemption here before returning if we've modified the ip.
 		 */
-		if (__this_cpu_read(bpf_kprobe_override)) {
-			__this_cpu_write(bpf_kprobe_override, 0);
+		if (orig_ip != instruction_pointer(regs)) {
 			reset_current_kprobe();
+			preempt_enable_no_resched();
 			return 1;
 		}
 		if (!ret)
@@ -1324,15 +1324,8 @@  static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
 	if (tk->tp.flags & TP_FLAG_TRACE)
 		kprobe_trace_func(tk, regs);
 #ifdef CONFIG_PERF_EVENTS
-	if (tk->tp.flags & TP_FLAG_PROFILE) {
+	if (tk->tp.flags & TP_FLAG_PROFILE)
 		ret = kprobe_perf_func(tk, regs);
-		/*
-		 * The ftrace kprobe handler leaves it up to us to re-enable
-		 * preemption here before returning if we've modified the ip.
-		 */
-		if (ret)
-			preempt_enable_no_resched();
-	}
 #endif
 	return ret;
 }