diff mbox series

[v2,bpf-next,1/2] bpf: separate bpf_get_[stack|stackid] for perf events BPF

Message ID 20200715052601.2404533-2-songliubraving@fb.com
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series bpf: fix stackmap on perf_events with PEBS | expand

Commit Message

Song Liu July 15, 2020, 5:26 a.m. UTC
Calling get_perf_callchain() on perf_events from PEBS entries may cause
unwinder errors. To fix this issue, the callchain is fetched early. Such
perf_events are marked with __PERF_SAMPLE_CALLCHAIN_EARLY.

Similarly, calling bpf_get_[stack|stackid] on perf_events from PEBS may
also cause unwinder errors. To fix this, add separate version of these
two helpers, bpf_get_[stack|stackid]_pe. These two hepers use callchain in
bpf_perf_event_data_kern->data->callchain.

Signed-off-by: Song Liu <songliubraving@fb.com>
---
 include/linux/bpf.h      |   2 +
 kernel/bpf/stackmap.c    | 204 +++++++++++++++++++++++++++++++++++----
 kernel/trace/bpf_trace.c |   4 +-
 3 files changed, 190 insertions(+), 20 deletions(-)

Comments

Andrii Nakryiko July 16, 2020, 5:55 a.m. UTC | #1
On Tue, Jul 14, 2020 at 11:08 PM Song Liu <songliubraving@fb.com> wrote:
>
> Calling get_perf_callchain() on perf_events from PEBS entries may cause
> unwinder errors. To fix this issue, the callchain is fetched early. Such
> perf_events are marked with __PERF_SAMPLE_CALLCHAIN_EARLY.
>
> Similarly, calling bpf_get_[stack|stackid] on perf_events from PEBS may
> also cause unwinder errors. To fix this, add separate version of these
> two helpers, bpf_get_[stack|stackid]_pe. These two hepers use callchain in
> bpf_perf_event_data_kern->data->callchain.
>
> Signed-off-by: Song Liu <songliubraving@fb.com>
> ---
>  include/linux/bpf.h      |   2 +
>  kernel/bpf/stackmap.c    | 204 +++++++++++++++++++++++++++++++++++----
>  kernel/trace/bpf_trace.c |   4 +-
>  3 files changed, 190 insertions(+), 20 deletions(-)
>

Glad this approach worked out! Few minor bugs below, though.

[...]

> +       if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
> +                              BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
> +               return -EINVAL;
> +
> +       user = flags & BPF_F_USER_STACK;
> +       kernel = !user;
> +
> +       has_kernel = !event->attr.exclude_callchain_kernel;
> +       has_user = !event->attr.exclude_callchain_user;
> +
> +       if ((kernel && !has_kernel) || (user && !has_user))
> +               return -EINVAL;
> +
> +       trace = ctx->data->callchain;
> +       if (!trace || (!has_kernel && !has_user))

(!has_kernel && !has_user) can never happen, it's checked by if above
(one of kernel or user is always true => one of has_user or has_kernel
is always true).

> +               return -EFAULT;
> +
> +       if (has_kernel && has_user) {
> +               __u64 nr_kernel = count_kernel_ip(trace);
> +               int ret;
> +
> +               if (kernel) {
> +                       __u64 nr = trace->nr;
> +
> +                       trace->nr = nr_kernel;
> +                       ret = __bpf_get_stackid(map, trace, flags);
> +
> +                       /* restore nr */
> +                       trace->nr = nr;
> +               } else { /* user */
> +                       u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
> +
> +                       skip += nr_kernel;
> +                       if (skip > ~BPF_F_SKIP_FIELD_MASK)

something fishy here: ~BPF_F_SKIP_FIELD_MASK is a really big number,
were you going to check that skip is not bigger than 255 (i.e., fits
within BPF_F_SKIP_FIELD_MASK)?

> +                               return -EFAULT;
> +
> +                       flags = (flags & ~BPF_F_SKIP_FIELD_MASK) |
> +                               (skip  & BPF_F_SKIP_FIELD_MASK);
> +                       ret = __bpf_get_stackid(map, trace, flags);
> +               }
> +               return ret;
> +       }
> +       return __bpf_get_stackid(map, trace, flags);
> +}
> +

[...]

> +
> +       has_kernel = !event->attr.exclude_callchain_kernel;
> +       has_user = !event->attr.exclude_callchain_user;
> +
> +       if ((kernel && !has_kernel) || (user && !has_user))
> +               goto clear;
> +
> +       err = -EFAULT;
> +       trace = ctx->data->callchain;
> +       if (!trace || (!has_kernel && !has_user))
> +               goto clear;

same as above for bpf_get_stackid, probably can be simplified

> +
> +       if (has_kernel && has_user) {
> +               __u64 nr_kernel = count_kernel_ip(trace);
> +               int ret;
> +
> +               if (kernel) {
> +                       __u64 nr = trace->nr;
> +
> +                       trace->nr = nr_kernel;
> +                       ret = __bpf_get_stack(ctx->regs, NULL, trace, buf,
> +                                             size, flags);
> +
> +                       /* restore nr */
> +                       trace->nr = nr;
> +               } else { /* user */
> +                       u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
> +
> +                       skip += nr_kernel;
> +                       if (skip > ~BPF_F_SKIP_FIELD_MASK)
> +                               goto clear;
> +

and here

> +                       flags = (flags & ~BPF_F_SKIP_FIELD_MASK) |
> +                               (skip  & BPF_F_SKIP_FIELD_MASK);

actually if you check that skip <= BPF_F_SKIP_FIELD_MASK, you don't
need to mask it here, just `| skip`

> +                       ret = __bpf_get_stack(ctx->regs, NULL, trace, buf,
> +                                             size, flags);
> +               }
> +               return ret;
> +       }
> +       return __bpf_get_stack(ctx->regs, NULL, trace, buf, size, flags);
> +clear:
> +       memset(buf, 0, size);
> +       return err;
> +
> +}
> +

[...]
kernel test robot July 17, 2020, 1:07 a.m. UTC | #2
Hi Song,

I love your patch! Yet something to improve:

[auto build test ERROR on bpf-next/master]

url:    https://github.com/0day-ci/linux/commits/Song-Liu/bpf-fix-stackmap-on-perf_events-with-PEBS/20200715-133118
base:   https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
config: arm64-randconfig-r004-20200716 (attached as .config)
compiler: clang version 12.0.0 (https://github.com/llvm/llvm-project ed6b578040a85977026c93bf4188f996148f3218)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install arm64 cross compiling tool for clang build
        # apt-get install binutils-aarch64-linux-gnu
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=arm64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

>> kernel/bpf/stackmap.c:698:26: error: incompatible pointer types passing 'bpf_user_pt_regs_t *' (aka 'struct user_pt_regs *') to parameter of type 'struct pt_regs *' [-Werror,-Wincompatible-pointer-types]
                   return __bpf_get_stack(ctx->regs, NULL, NULL, buf, size, flags);
                                          ^~~~~~~~~
   kernel/bpf/stackmap.c:581:45: note: passing argument to parameter 'regs' here
   static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
                                               ^
   kernel/bpf/stackmap.c:726:26: error: incompatible pointer types passing 'bpf_user_pt_regs_t *' (aka 'struct user_pt_regs *') to parameter of type 'struct pt_regs *' [-Werror,-Wincompatible-pointer-types]
                           ret = __bpf_get_stack(ctx->regs, NULL, trace, buf,
                                                 ^~~~~~~~~
   kernel/bpf/stackmap.c:581:45: note: passing argument to parameter 'regs' here
   static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
                                               ^
   kernel/bpf/stackmap.c:740:26: error: incompatible pointer types passing 'bpf_user_pt_regs_t *' (aka 'struct user_pt_regs *') to parameter of type 'struct pt_regs *' [-Werror,-Wincompatible-pointer-types]
                           ret = __bpf_get_stack(ctx->regs, NULL, trace, buf,
                                                 ^~~~~~~~~
   kernel/bpf/stackmap.c:581:45: note: passing argument to parameter 'regs' here
   static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
                                               ^
   kernel/bpf/stackmap.c:745:25: error: incompatible pointer types passing 'bpf_user_pt_regs_t *' (aka 'struct user_pt_regs *') to parameter of type 'struct pt_regs *' [-Werror,-Wincompatible-pointer-types]
           return __bpf_get_stack(ctx->regs, NULL, trace, buf, size, flags);
                                  ^~~~~~~~~
   kernel/bpf/stackmap.c:581:45: note: passing argument to parameter 'regs' here
   static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
                                               ^
   4 errors generated.

vim +698 kernel/bpf/stackmap.c

   687	
   688	BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
   689		   void *, buf, u32, size, u64, flags)
   690	{
   691		struct perf_event *event = ctx->event;
   692		struct perf_callchain_entry *trace;
   693		bool has_kernel, has_user;
   694		bool kernel, user;
   695		int err = -EINVAL;
   696	
   697		if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
 > 698			return __bpf_get_stack(ctx->regs, NULL, NULL, buf, size, flags);
   699	
   700		if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
   701				       BPF_F_USER_BUILD_ID)))
   702			goto clear;
   703	
   704		user = flags & BPF_F_USER_STACK;
   705		kernel = !user;
   706	
   707		has_kernel = !event->attr.exclude_callchain_kernel;
   708		has_user = !event->attr.exclude_callchain_user;
   709	
   710		if ((kernel && !has_kernel) || (user && !has_user))
   711			goto clear;
   712	
   713		err = -EFAULT;
   714		trace = ctx->data->callchain;
   715		if (!trace || (!has_kernel && !has_user))
   716			goto clear;
   717	
   718		if (has_kernel && has_user) {
   719			__u64 nr_kernel = count_kernel_ip(trace);
   720			int ret;
   721	
   722			if (kernel) {
   723				__u64 nr = trace->nr;
   724	
   725				trace->nr = nr_kernel;
   726				ret = __bpf_get_stack(ctx->regs, NULL, trace, buf,
   727						      size, flags);
   728	
   729				/* restore nr */
   730				trace->nr = nr;
   731			} else { /* user */
   732				u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
   733	
   734				skip += nr_kernel;
   735				if (skip > ~BPF_F_SKIP_FIELD_MASK)
   736					goto clear;
   737	
   738				flags = (flags & ~BPF_F_SKIP_FIELD_MASK) |
   739					(skip  & BPF_F_SKIP_FIELD_MASK);
   740				ret = __bpf_get_stack(ctx->regs, NULL, trace, buf,
   741						      size, flags);
   742			}
   743			return ret;
   744		}
   745		return __bpf_get_stack(ctx->regs, NULL, trace, buf, size, flags);
   746	clear:
   747		memset(buf, 0, size);
   748		return err;
   749	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c67c88ad35f85..bfc7a283c0f93 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1637,6 +1637,8 @@  extern const struct bpf_func_proto bpf_get_current_comm_proto;
 extern const struct bpf_func_proto bpf_get_stackid_proto;
 extern const struct bpf_func_proto bpf_get_stack_proto;
 extern const struct bpf_func_proto bpf_get_task_stack_proto;
+extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
+extern const struct bpf_func_proto bpf_get_stack_proto_pe;
 extern const struct bpf_func_proto bpf_sock_map_update_proto;
 extern const struct bpf_func_proto bpf_sock_hash_update_proto;
 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 48d8e739975fa..0587d4ddb06ce 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -4,6 +4,7 @@ 
 #include <linux/bpf.h>
 #include <linux/jhash.h>
 #include <linux/filter.h>
+#include <linux/kernel.h>
 #include <linux/stacktrace.h>
 #include <linux/perf_event.h>
 #include <linux/elf.h>
@@ -387,11 +388,10 @@  get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
 #endif
 }
 
-BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
-	   u64, flags)
+static long __bpf_get_stackid(struct bpf_map *map,
+			      struct perf_callchain_entry *trace, u64 flags)
 {
 	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
-	struct perf_callchain_entry *trace;
 	struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
 	u32 max_depth = map->value_size / stack_map_data_size(map);
 	/* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
@@ -399,21 +399,9 @@  BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
 	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
 	u32 hash, id, trace_nr, trace_len;
 	bool user = flags & BPF_F_USER_STACK;
-	bool kernel = !user;
 	u64 *ips;
 	bool hash_matches;
 
-	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
-			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
-		return -EINVAL;
-
-	trace = get_perf_callchain(regs, init_nr, kernel, user,
-				   sysctl_perf_event_max_stack, false, false);
-
-	if (unlikely(!trace))
-		/* couldn't fetch the stack trace */
-		return -EFAULT;
-
 	/* get_perf_callchain() guarantees that trace->nr >= init_nr
 	 * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
 	 */
@@ -478,6 +466,30 @@  BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
 	return id;
 }
 
+BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
+	   u64, flags)
+{
+	u32 max_depth = map->value_size / stack_map_data_size(map);
+	/* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
+	u32 init_nr = sysctl_perf_event_max_stack - max_depth;
+	bool user = flags & BPF_F_USER_STACK;
+	struct perf_callchain_entry *trace;
+	bool kernel = !user;
+
+	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
+			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
+		return -EINVAL;
+
+	trace = get_perf_callchain(regs, init_nr, kernel, user,
+				   sysctl_perf_event_max_stack, false, false);
+
+	if (unlikely(!trace))
+		/* couldn't fetch the stack trace */
+		return -EFAULT;
+
+	return __bpf_get_stackid(map, trace, flags);
+}
+
 const struct bpf_func_proto bpf_get_stackid_proto = {
 	.func		= bpf_get_stackid,
 	.gpl_only	= true,
@@ -487,7 +499,87 @@  const struct bpf_func_proto bpf_get_stackid_proto = {
 	.arg3_type	= ARG_ANYTHING,
 };
 
+static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
+{
+	__u64 nr_kernel = 0;
+
+	while (nr_kernel < trace->nr) {
+		if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
+			break;
+		nr_kernel++;
+	}
+	return nr_kernel;
+}
+
+BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
+	   struct bpf_map *, map, u64, flags)
+{
+	struct perf_event *event = ctx->event;
+	struct perf_callchain_entry *trace;
+	bool has_kernel, has_user;
+	bool kernel, user;
+
+	/* perf_sample_data doesn't have callchain, use bpf_get_stackid */
+	if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
+		return bpf_get_stackid((unsigned long)(ctx->regs),
+				       (unsigned long) map, flags, 0, 0);
+
+	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
+			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
+		return -EINVAL;
+
+	user = flags & BPF_F_USER_STACK;
+	kernel = !user;
+
+	has_kernel = !event->attr.exclude_callchain_kernel;
+	has_user = !event->attr.exclude_callchain_user;
+
+	if ((kernel && !has_kernel) || (user && !has_user))
+		return -EINVAL;
+
+	trace = ctx->data->callchain;
+	if (!trace || (!has_kernel && !has_user))
+		return -EFAULT;
+
+	if (has_kernel && has_user) {
+		__u64 nr_kernel = count_kernel_ip(trace);
+		int ret;
+
+		if (kernel) {
+			__u64 nr = trace->nr;
+
+			trace->nr = nr_kernel;
+			ret = __bpf_get_stackid(map, trace, flags);
+
+			/* restore nr */
+			trace->nr = nr;
+		} else { /* user */
+			u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
+
+			skip += nr_kernel;
+			if (skip > ~BPF_F_SKIP_FIELD_MASK)
+				return -EFAULT;
+
+			flags = (flags & ~BPF_F_SKIP_FIELD_MASK) |
+				(skip  & BPF_F_SKIP_FIELD_MASK);
+			ret = __bpf_get_stackid(map, trace, flags);
+		}
+		return ret;
+	}
+	return __bpf_get_stackid(map, trace, flags);
+}
+
+const struct bpf_func_proto bpf_get_stackid_proto_pe = {
+	.func		= bpf_get_stackid_pe,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_CONST_MAP_PTR,
+	.arg3_type	= ARG_ANYTHING,
+};
+
 static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
+			    struct perf_callchain_entry *trace_in,
 			    void *buf, u32 size, u64 flags)
 {
 	u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
@@ -520,7 +612,9 @@  static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
 	else
 		init_nr = sysctl_perf_event_max_stack - num_elem;
 
-	if (kernel && task)
+	if (trace_in)
+		trace = trace_in;
+	else if (kernel && task)
 		trace = get_callchain_entry_for_task(task, init_nr);
 	else
 		trace = get_perf_callchain(regs, init_nr, kernel, user,
@@ -556,7 +650,7 @@  static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
 BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
 	   u64, flags)
 {
-	return __bpf_get_stack(regs, NULL, buf, size, flags);
+	return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
 }
 
 const struct bpf_func_proto bpf_get_stack_proto = {
@@ -574,7 +668,7 @@  BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
 {
 	struct pt_regs *regs = task_pt_regs(task);
 
-	return __bpf_get_stack(regs, task, buf, size, flags);
+	return __bpf_get_stack(regs, task, NULL, buf, size, flags);
 }
 
 BTF_ID_LIST(bpf_get_task_stack_btf_ids)
@@ -591,6 +685,80 @@  const struct bpf_func_proto bpf_get_task_stack_proto = {
 	.btf_id		= bpf_get_task_stack_btf_ids,
 };
 
+BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
+	   void *, buf, u32, size, u64, flags)
+{
+	struct perf_event *event = ctx->event;
+	struct perf_callchain_entry *trace;
+	bool has_kernel, has_user;
+	bool kernel, user;
+	int err = -EINVAL;
+
+	if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
+		return __bpf_get_stack(ctx->regs, NULL, NULL, buf, size, flags);
+
+	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
+			       BPF_F_USER_BUILD_ID)))
+		goto clear;
+
+	user = flags & BPF_F_USER_STACK;
+	kernel = !user;
+
+	has_kernel = !event->attr.exclude_callchain_kernel;
+	has_user = !event->attr.exclude_callchain_user;
+
+	if ((kernel && !has_kernel) || (user && !has_user))
+		goto clear;
+
+	err = -EFAULT;
+	trace = ctx->data->callchain;
+	if (!trace || (!has_kernel && !has_user))
+		goto clear;
+
+	if (has_kernel && has_user) {
+		__u64 nr_kernel = count_kernel_ip(trace);
+		int ret;
+
+		if (kernel) {
+			__u64 nr = trace->nr;
+
+			trace->nr = nr_kernel;
+			ret = __bpf_get_stack(ctx->regs, NULL, trace, buf,
+					      size, flags);
+
+			/* restore nr */
+			trace->nr = nr;
+		} else { /* user */
+			u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
+
+			skip += nr_kernel;
+			if (skip > ~BPF_F_SKIP_FIELD_MASK)
+				goto clear;
+
+			flags = (flags & ~BPF_F_SKIP_FIELD_MASK) |
+				(skip  & BPF_F_SKIP_FIELD_MASK);
+			ret = __bpf_get_stack(ctx->regs, NULL, trace, buf,
+					      size, flags);
+		}
+		return ret;
+	}
+	return __bpf_get_stack(ctx->regs, NULL, trace, buf, size, flags);
+clear:
+	memset(buf, 0, size);
+	return err;
+
+}
+
+const struct bpf_func_proto bpf_get_stack_proto_pe = {
+	.func		= bpf_get_stack_pe,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
+	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
+	.arg4_type	= ARG_ANYTHING,
+};
+
 /* Called from eBPF program */
 static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
 {
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 3cc0dcb60ca20..cb91ef902cc43 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1411,9 +1411,9 @@  pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 	case BPF_FUNC_perf_event_output:
 		return &bpf_perf_event_output_proto_tp;
 	case BPF_FUNC_get_stackid:
-		return &bpf_get_stackid_proto_tp;
+		return &bpf_get_stackid_proto_pe;
 	case BPF_FUNC_get_stack:
-		return &bpf_get_stack_proto_tp;
+		return &bpf_get_stack_proto_pe;
 	case BPF_FUNC_perf_prog_read_value:
 		return &bpf_perf_prog_read_value_proto;
 	case BPF_FUNC_read_branch_records: