diff mbox series

[v3,bpf-next,1/3] bpf: Add bpf_perf_prog_read_branches() helper

Message ID 20200123212312.3963-2-dxu@dxuuu.xyz
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series Add bpf_perf_prog_read_branches() helper | expand

Commit Message

Daniel Xu Jan. 23, 2020, 9:23 p.m. UTC
Branch records are a CPU feature that can be configured to record
certain branches that are taken during code execution. This data is
particularly interesting for profile guided optimizations. perf has had
branch record support for a while but the data collection can be a bit
coarse grained.

We (Facebook) have seen in experiments that associating metadata with
branch records can improve results (after postprocessing). We generally
use bpf_probe_read_*() to get metadata out of userspace. That's why bpf
support for branch records is useful.

Aside from this particular use case, having branch data available to bpf
progs can be useful to get stack traces out of userspace applications
that omit frame pointers.

Signed-off-by: Daniel Xu <dxu@dxuuu.xyz>
---
 include/uapi/linux/bpf.h | 15 ++++++++++++++-
 kernel/trace/bpf_trace.c | 31 +++++++++++++++++++++++++++++++
 2 files changed, 45 insertions(+), 1 deletion(-)

Comments

Yonghong Song Jan. 23, 2020, 11:48 p.m. UTC | #1
On 1/23/20 1:23 PM, Daniel Xu wrote:
> Branch records are a CPU feature that can be configured to record
> certain branches that are taken during code execution. This data is
> particularly interesting for profile guided optimizations. perf has had
> branch record support for a while but the data collection can be a bit
> coarse grained.
> 
> We (Facebook) have seen in experiments that associating metadata with
> branch records can improve results (after postprocessing). We generally
> use bpf_probe_read_*() to get metadata out of userspace. That's why bpf
> support for branch records is useful.
> 
> Aside from this particular use case, having branch data available to bpf
> progs can be useful to get stack traces out of userspace applications
> that omit frame pointers.
> 
> Signed-off-by: Daniel Xu <dxu@dxuuu.xyz>
> ---
>   include/uapi/linux/bpf.h | 15 ++++++++++++++-
>   kernel/trace/bpf_trace.c | 31 +++++++++++++++++++++++++++++++
>   2 files changed, 45 insertions(+), 1 deletion(-)
> 
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index f1d74a2bd234..50c580c8a201 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -2892,6 +2892,18 @@ union bpf_attr {
>    *		Obtain the 64bit jiffies
>    *	Return
>    *		The 64 bit jiffies
> + *
> + * int bpf_perf_prog_read_branches(struct bpf_perf_event_data *ctx, void *buf, u32 buf_size)
> + *	Description
> + *		For en eBPF program attached to a perf event, retrieve the

en => an

> + *		branch records (struct perf_branch_entry) associated to *ctx*
> + *		and store it in	the buffer pointed by *buf* up to size
> + *		*buf_size* bytes.
> + *
> + *		Any unused parts of *buf* will be filled with zeros.
> + *	Return
> + *		On success, number of bytes written to *buf*. On error, a
> + *		negative value.
>    */
>   #define __BPF_FUNC_MAPPER(FN)		\
>   	FN(unspec),			\
> @@ -3012,7 +3024,8 @@ union bpf_attr {
>   	FN(probe_read_kernel_str),	\
>   	FN(tcp_send_ack),		\
>   	FN(send_signal_thread),		\
> -	FN(jiffies64),
> +	FN(jiffies64),			\
> +	FN(perf_prog_read_branches),
>   
>   /* integer value in 'imm' field of BPF_CALL instruction selects which helper
>    * function eBPF program intends to call
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index 19e793aa441a..24c51272a1f7 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -1028,6 +1028,35 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
>            .arg3_type      = ARG_CONST_SIZE,
>   };
>   
> +BPF_CALL_3(bpf_perf_prog_read_branches, struct bpf_perf_event_data_kern *, ctx,
> +	   void *, buf, u32, size)
> +{
> +	struct perf_branch_stack *br_stack = ctx->data->br_stack;
> +	u32 to_copy = 0, to_clear = size;
> +	int err = -EINVAL;
> +
> +	if (unlikely(!br_stack))
> +		goto clear;
> +
> +	to_copy = min_t(u32, br_stack->nr * sizeof(struct perf_branch_entry), size);
> +	to_clear -= to_copy;
> +
> +	memcpy(buf, br_stack->entries, to_copy);
> +	err = to_copy;
> +clear:
> +	memset(buf + to_copy, 0, to_clear);
> +	return err;

If size < u32, br_stack->nr * sizeof(struct perf_branch_entry),
user has no way to know whether some entries are not copied except
repeated trying larger buffers until the return value is smaller
than input buffer size.

I think returning the expected buffer size to users should be a good 
thing? We may not have malloc today in bpf, but future malloc thing 
should help in this case.

In user space, user may have a fixed buffer, repeated `read` should
read all values.

Using bpf_probe_read(), repeated read with adjusted source pointer
can also read all buffers.

One possible design is to add a flag to the function, e.g., if
flag == GET_BR_STACK_NR, return br_stack->nr in buf/size.
if flag == GET_BR_STACK, return br_stack->entries in buf/size.

What do you think?


> +}
> +
> +static const struct bpf_func_proto bpf_perf_prog_read_branches_proto = {
> +         .func           = bpf_perf_prog_read_branches,
> +         .gpl_only       = true,
> +         .ret_type       = RET_INTEGER,
> +         .arg1_type      = ARG_PTR_TO_CTX,
> +         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
> +         .arg3_type      = ARG_CONST_SIZE,
> +};
> +
>   static const struct bpf_func_proto *
>   pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
>   {
> @@ -1040,6 +1069,8 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
>   		return &bpf_get_stack_proto_tp;
>   	case BPF_FUNC_perf_prog_read_value:
>   		return &bpf_perf_prog_read_value_proto;
> +	case BPF_FUNC_perf_prog_read_branches:
> +		return &bpf_perf_prog_read_branches_proto;
>   	default:
>   		return tracing_func_proto(func_id, prog);
>   	}
>
John Fastabend Jan. 24, 2020, 12:49 a.m. UTC | #2
Daniel Xu wrote:
> Branch records are a CPU feature that can be configured to record
> certain branches that are taken during code execution. This data is
> particularly interesting for profile guided optimizations. perf has had
> branch record support for a while but the data collection can be a bit
> coarse grained.
> 
> We (Facebook) have seen in experiments that associating metadata with
> branch records can improve results (after postprocessing). We generally
> use bpf_probe_read_*() to get metadata out of userspace. That's why bpf
> support for branch records is useful.
> 
> Aside from this particular use case, having branch data available to bpf
> progs can be useful to get stack traces out of userspace applications
> that omit frame pointers.
> 
> Signed-off-by: Daniel Xu <dxu@dxuuu.xyz>
> ---
>  include/uapi/linux/bpf.h | 15 ++++++++++++++-
>  kernel/trace/bpf_trace.c | 31 +++++++++++++++++++++++++++++++
>  2 files changed, 45 insertions(+), 1 deletion(-)
> 

[...]

>   * function eBPF program intends to call
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index 19e793aa441a..24c51272a1f7 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -1028,6 +1028,35 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
>           .arg3_type      = ARG_CONST_SIZE,
>  };
>  
> +BPF_CALL_3(bpf_perf_prog_read_branches, struct bpf_perf_event_data_kern *, ctx,
> +	   void *, buf, u32, size)
> +{
> +	struct perf_branch_stack *br_stack = ctx->data->br_stack;
> +	u32 to_copy = 0, to_clear = size;
> +	int err = -EINVAL;
> +
> +	if (unlikely(!br_stack))
> +		goto clear;
> +
> +	to_copy = min_t(u32, br_stack->nr * sizeof(struct perf_branch_entry), size);
> +	to_clear -= to_copy;
> +
> +	memcpy(buf, br_stack->entries, to_copy);
> +	err = to_copy;
> +clear:

There appears to be agreement to clear the extra buffer on error but what about
in the non-error case? I expect one usage pattern is to submit a fairly large
buffer, large enough to handle worse case nr, in this case we end up zero'ing
memory even in the succesful case. Can we skip the clear in this case? Maybe
its not too important either way but seems unnecessary.

> +	memset(buf + to_copy, 0, to_clear);
> +	return err;
> +}
Daniel Xu Jan. 24, 2020, 2:02 a.m. UTC | #3
On Thu Jan 23, 2020 at 4:49 PM, John Fastabend wrote:
[...]
> >   * function eBPF program intends to call
> > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> > index 19e793aa441a..24c51272a1f7 100644
> > --- a/kernel/trace/bpf_trace.c
> > +++ b/kernel/trace/bpf_trace.c
> > @@ -1028,6 +1028,35 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
> >           .arg3_type      = ARG_CONST_SIZE,
> >  };
> >  
> > +BPF_CALL_3(bpf_perf_prog_read_branches, struct bpf_perf_event_data_kern *, ctx,
> > +	   void *, buf, u32, size)
> > +{
> > +	struct perf_branch_stack *br_stack = ctx->data->br_stack;
> > +	u32 to_copy = 0, to_clear = size;
> > +	int err = -EINVAL;
> > +
> > +	if (unlikely(!br_stack))
> > +		goto clear;
> > +
> > +	to_copy = min_t(u32, br_stack->nr * sizeof(struct perf_branch_entry), size);
> > +	to_clear -= to_copy;
> > +
> > +	memcpy(buf, br_stack->entries, to_copy);
> > +	err = to_copy;
> > +clear:
>
> 
> There appears to be agreement to clear the extra buffer on error but
> what about
> in the non-error case? I expect one usage pattern is to submit a fairly
> large
> buffer, large enough to handle worse case nr, in this case we end up
> zero'ing
> memory even in the succesful case. Can we skip the clear in this case?
> Maybe
> its not too important either way but seems unnecessary.
>
> 
> > +	memset(buf + to_copy, 0, to_clear);
> > +	return err;
> > +}
>

Given Yonghong's suggestion of a flag argument, we need to allow users
to pass in a null ptr while getting buffer size. So I'll change the `buf`
argument to be ARG_PTR_TO_MEM_OR_NULL, which requires the buffer be
initialized. We can skip zero'ing out altogether.

Although I think the end result is the same -- now the user has to zero it
out. Unfortunately ARG_PTR_TO_UNINITIALIZED_MEM_OR_NULL is not
implemented yet.
Martin KaFai Lau Jan. 24, 2020, 8:25 a.m. UTC | #4
On Thu, Jan 23, 2020 at 06:02:58PM -0800, Daniel Xu wrote:
> On Thu Jan 23, 2020 at 4:49 PM, John Fastabend wrote:
> [...]
> > >   * function eBPF program intends to call
> > > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> > > index 19e793aa441a..24c51272a1f7 100644
> > > --- a/kernel/trace/bpf_trace.c
> > > +++ b/kernel/trace/bpf_trace.c
> > > @@ -1028,6 +1028,35 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
> > >           .arg3_type      = ARG_CONST_SIZE,
> > >  };
> > >  
> > > +BPF_CALL_3(bpf_perf_prog_read_branches, struct bpf_perf_event_data_kern *, ctx,
> > > +	   void *, buf, u32, size)
> > > +{
> > > +	struct perf_branch_stack *br_stack = ctx->data->br_stack;
> > > +	u32 to_copy = 0, to_clear = size;
> > > +	int err = -EINVAL;
> > > +
> > > +	if (unlikely(!br_stack))
> > > +		goto clear;
> > > +
> > > +	to_copy = min_t(u32, br_stack->nr * sizeof(struct perf_branch_entry), size);
> > > +	to_clear -= to_copy;
> > > +
> > > +	memcpy(buf, br_stack->entries, to_copy);
> > > +	err = to_copy;
> > > +clear:
> >
> > 
> > There appears to be agreement to clear the extra buffer on error but
> > what about
> > in the non-error case? I expect one usage pattern is to submit a fairly
> > large
> > buffer, large enough to handle worse case nr, in this case we end up
> > zero'ing
> > memory even in the succesful case. Can we skip the clear in this case?
> > Maybe
> > its not too important either way but seems unnecessary.
After some thoughts,  I also think clearing for non-error case
is not ideal.  DanielXu, is it the common use case to always
have a large enough buf size to capture the interested data?

> >
> > 
> > > +	memset(buf + to_copy, 0, to_clear);
> > > +	return err;
> > > +}
> >
> 
> Given Yonghong's suggestion of a flag argument, we need to allow users
> to pass in a null ptr while getting buffer size. So I'll change the `buf`
> argument to be ARG_PTR_TO_MEM_OR_NULL, which requires the buffer be
> initialized. We can skip zero'ing out altogether.
> 
> Although I think the end result is the same -- now the user has to zero it
> out. Unfortunately ARG_PTR_TO_UNINITIALIZED_MEM_OR_NULL is not
> implemented yet.
A "flags" arg can be added but not used to keep our option open in the
future.  Not sure it has to be implemented now though.
I would think whether there is an immediate usecase to learn
br_stack->nr through an extra bpf helper call in every event.

When there is a use case for learning br_stack->nr,
there may have multiple ways to do it also,
this "flags" arg, or another helper,
or br_stack->nr may be read directly with the help of BTF.
Daniel Xu Jan. 24, 2020, 8:28 p.m. UTC | #5
On Fri Jan 24, 2020 at 8:25 AM, Martin Lau wrote:
> On Thu, Jan 23, 2020 at 06:02:58PM -0800, Daniel Xu wrote:
> > On Thu Jan 23, 2020 at 4:49 PM, John Fastabend wrote:
> > [...]
> > > >   * function eBPF program intends to call
> > > > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> > > > index 19e793aa441a..24c51272a1f7 100644
> > > > --- a/kernel/trace/bpf_trace.c
> > > > +++ b/kernel/trace/bpf_trace.c
> > > > @@ -1028,6 +1028,35 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
> > > >           .arg3_type      = ARG_CONST_SIZE,
> > > >  };
> > > >  
> > > > +BPF_CALL_3(bpf_perf_prog_read_branches, struct bpf_perf_event_data_kern *, ctx,
> > > > +	   void *, buf, u32, size)
> > > > +{
> > > > +	struct perf_branch_stack *br_stack = ctx->data->br_stack;
> > > > +	u32 to_copy = 0, to_clear = size;
> > > > +	int err = -EINVAL;
> > > > +
> > > > +	if (unlikely(!br_stack))
> > > > +		goto clear;
> > > > +
> > > > +	to_copy = min_t(u32, br_stack->nr * sizeof(struct perf_branch_entry), size);
> > > > +	to_clear -= to_copy;
> > > > +
> > > > +	memcpy(buf, br_stack->entries, to_copy);
> > > > +	err = to_copy;
> > > > +clear:
> > >
> > > 
> > > There appears to be agreement to clear the extra buffer on error but
> > > what about
> > > in the non-error case? I expect one usage pattern is to submit a fairly
> > > large
> > > buffer, large enough to handle worse case nr, in this case we end up
> > > zero'ing
> > > memory even in the succesful case. Can we skip the clear in this case?
> > > Maybe
> > > its not too important either way but seems unnecessary.
> After some thoughts, I also think clearing for non-error case
> is not ideal. DanielXu, is it the common use case to always
> have a large enough buf size to capture the interested data?

Yeah, ideally you want all of the data. But since branch data is already
sampled, it's not too bad to lose some events as long as they're
consistently lost (eg only keep 4 branch entries).

I personally don't have strong opinions about clearing unused buffer on
success. However the internal documentation does say the helper must
fill all the buffer, regardless of success. It seems like from this
conversation there's no functional difference.

>
> 
> > >
> > > 
> > > > +	memset(buf + to_copy, 0, to_clear);
> > > > +	return err;
> > > > +}
> > >
> > 
> > Given Yonghong's suggestion of a flag argument, we need to allow users
> > to pass in a null ptr while getting buffer size. So I'll change the `buf`
> > argument to be ARG_PTR_TO_MEM_OR_NULL, which requires the buffer be
> > initialized. We can skip zero'ing out altogether.
> > 
> > Although I think the end result is the same -- now the user has to zero it
> > out. Unfortunately ARG_PTR_TO_UNINITIALIZED_MEM_OR_NULL is not
> > implemented yet.
> A "flags" arg can be added but not used to keep our option open in the
> future. Not sure it has to be implemented now though.
> I would think whether there is an immediate usecase to learn
> br_stack->nr through an extra bpf helper call in every event.
>
> 
> When there is a use case for learning br_stack->nr,
> there may have multiple ways to do it also,
> this "flags" arg, or another helper,
> or br_stack->nr may be read directly with the help of BTF.

I thought about this more and I think one use case could be to figure
out how many branch entries you failed to collect and report it to
userspace for aggregation later. I agree there are multiple ways to do
it, but they would all require another helper call.

Since right now you have to statically define your buffer size, it's
quite easy to lose entries. So for completeness of the API, it would be
good to know how much data you lost.

Doing it via a flag seems fairly natural to me. Another helper seems a
little overkill. BTF could work but it's a less strong API guarantee
than the helper (ie field name changes).
diff mbox series

Patch

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f1d74a2bd234..50c580c8a201 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -2892,6 +2892,18 @@  union bpf_attr {
  *		Obtain the 64bit jiffies
  *	Return
  *		The 64 bit jiffies
+ *
+ * int bpf_perf_prog_read_branches(struct bpf_perf_event_data *ctx, void *buf, u32 buf_size)
+ *	Description
+ *		For en eBPF program attached to a perf event, retrieve the
+ *		branch records (struct perf_branch_entry) associated to *ctx*
+ *		and store it in	the buffer pointed by *buf* up to size
+ *		*buf_size* bytes.
+ *
+ *		Any unused parts of *buf* will be filled with zeros.
+ *	Return
+ *		On success, number of bytes written to *buf*. On error, a
+ *		negative value.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -3012,7 +3024,8 @@  union bpf_attr {
 	FN(probe_read_kernel_str),	\
 	FN(tcp_send_ack),		\
 	FN(send_signal_thread),		\
-	FN(jiffies64),
+	FN(jiffies64),			\
+	FN(perf_prog_read_branches),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 19e793aa441a..24c51272a1f7 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1028,6 +1028,35 @@  static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
          .arg3_type      = ARG_CONST_SIZE,
 };
 
+BPF_CALL_3(bpf_perf_prog_read_branches, struct bpf_perf_event_data_kern *, ctx,
+	   void *, buf, u32, size)
+{
+	struct perf_branch_stack *br_stack = ctx->data->br_stack;
+	u32 to_copy = 0, to_clear = size;
+	int err = -EINVAL;
+
+	if (unlikely(!br_stack))
+		goto clear;
+
+	to_copy = min_t(u32, br_stack->nr * sizeof(struct perf_branch_entry), size);
+	to_clear -= to_copy;
+
+	memcpy(buf, br_stack->entries, to_copy);
+	err = to_copy;
+clear:
+	memset(buf + to_copy, 0, to_clear);
+	return err;
+}
+
+static const struct bpf_func_proto bpf_perf_prog_read_branches_proto = {
+         .func           = bpf_perf_prog_read_branches,
+         .gpl_only       = true,
+         .ret_type       = RET_INTEGER,
+         .arg1_type      = ARG_PTR_TO_CTX,
+         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
+         .arg3_type      = ARG_CONST_SIZE,
+};
+
 static const struct bpf_func_proto *
 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 {
@@ -1040,6 +1069,8 @@  pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 		return &bpf_get_stack_proto_tp;
 	case BPF_FUNC_perf_prog_read_value:
 		return &bpf_perf_prog_read_value_proto;
+	case BPF_FUNC_perf_prog_read_branches:
+		return &bpf_perf_prog_read_branches_proto;
 	default:
 		return tracing_func_proto(func_id, prog);
 	}