diff mbox series

[bpf-next,v5,2/5] bpf: extend bpf_base_func_proto helpers with probe_* and *current_task*

Message ID 159033905529.12355.4368381069655254932.stgit@john-Precision-5820-Tower
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series bpf: Add sk_msg and networking helpers | expand

Commit Message

John Fastabend May 24, 2020, 4:50 p.m. UTC
Often it is useful when applying policy to know something about the
task. If the administrator has CAP_SYS_ADMIN rights then they can
use kprobe + networking hook and link the two programs together to
accomplish this. However, this is a bit clunky and also means we have
to call both the network program and kprobe program when we could just
use a single program and avoid passing metadata through sk_msg/skb->cb,
socket, maps, etc.

To accomplish this add probe_* helpers to bpf_base_func_proto programs
guarded by a perfmon_capable() check. New supported helpers are the
following,

 BPF_FUNC_get_current_task
 BPF_FUNC_current_task_under_cgroup
 BPF_FUNC_probe_read_user
 BPF_FUNC_probe_read_kernel
 BPF_FUNC_probe_read_user_str
 BPF_FUNC_probe_read_kernel_str

Signed-off-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: Yonghong Song <yhs@fb.com>
---
 kernel/bpf/helpers.c     |   24 ++++++++++++++++++++++++
 kernel/trace/bpf_trace.c |   10 +++++-----
 2 files changed, 29 insertions(+), 5 deletions(-)

Comments

Daniel Borkmann May 25, 2020, 9:52 p.m. UTC | #1
On 5/24/20 6:50 PM, John Fastabend wrote:
> Often it is useful when applying policy to know something about the
> task. If the administrator has CAP_SYS_ADMIN rights then they can
> use kprobe + networking hook and link the two programs together to
> accomplish this. However, this is a bit clunky and also means we have
> to call both the network program and kprobe program when we could just
> use a single program and avoid passing metadata through sk_msg/skb->cb,
> socket, maps, etc.
> 
> To accomplish this add probe_* helpers to bpf_base_func_proto programs
> guarded by a perfmon_capable() check. New supported helpers are the
> following,
> 
>   BPF_FUNC_get_current_task
>   BPF_FUNC_current_task_under_cgroup

Nit: Stale commit message?

>   BPF_FUNC_probe_read_user
>   BPF_FUNC_probe_read_kernel
>   BPF_FUNC_probe_read_user_str
>   BPF_FUNC_probe_read_kernel_str
> 
> Signed-off-by: John Fastabend <john.fastabend@gmail.com>
> Acked-by: Yonghong Song <yhs@fb.com>
> ---
>   kernel/bpf/helpers.c     |   24 ++++++++++++++++++++++++
>   kernel/trace/bpf_trace.c |   10 +++++-----
>   2 files changed, 29 insertions(+), 5 deletions(-)
> 
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 886949f..bb4fb63 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -601,6 +601,12 @@ const struct bpf_func_proto bpf_event_output_data_proto =  {
>   	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
>   };
>   
> +const struct bpf_func_proto bpf_get_current_task_proto __weak;
> +const struct bpf_func_proto bpf_probe_read_user_proto __weak;
> +const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
> +const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
> +const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
> +
>   const struct bpf_func_proto *
>   bpf_base_func_proto(enum bpf_func_id func_id)
>   {
> @@ -648,6 +654,24 @@ bpf_base_func_proto(enum bpf_func_id func_id)
>   	case BPF_FUNC_jiffies64:
>   		return &bpf_jiffies64_proto;
>   	default:
> +		break;
> +	}
> +
> +	if (!perfmon_capable())
> +		return NULL;
> +
> +	switch (func_id) {
> +	case BPF_FUNC_get_current_task:
> +		return &bpf_get_current_task_proto;
> +	case BPF_FUNC_probe_read_user:
> +		return &bpf_probe_read_user_proto;
> +	case BPF_FUNC_probe_read_kernel:
> +		return &bpf_probe_read_kernel_proto;
> +	case BPF_FUNC_probe_read_user_str:
> +		return &bpf_probe_read_user_str_proto;
> +	case BPF_FUNC_probe_read_kernel_str:
> +		return &bpf_probe_read_kernel_str_proto;
> +	default:
>   		return NULL;
>   	}
>   }
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index 9531f54..187cd69 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -147,7 +147,7 @@ BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
>   	return ret;
>   }
>   
> -static const struct bpf_func_proto bpf_probe_read_user_proto = {
> +const struct bpf_func_proto bpf_probe_read_user_proto = {
>   	.func		= bpf_probe_read_user,
>   	.gpl_only	= true,
>   	.ret_type	= RET_INTEGER,
> @@ -167,7 +167,7 @@ BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
>   	return ret;
>   }
>   
> -static const struct bpf_func_proto bpf_probe_read_user_str_proto = {
> +const struct bpf_func_proto bpf_probe_read_user_str_proto = {
>   	.func		= bpf_probe_read_user_str,
>   	.gpl_only	= true,
>   	.ret_type	= RET_INTEGER,
> @@ -198,7 +198,7 @@ BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
>   	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false);
>   }
>   
> -static const struct bpf_func_proto bpf_probe_read_kernel_proto = {
> +const struct bpf_func_proto bpf_probe_read_kernel_proto = {
>   	.func		= bpf_probe_read_kernel,
>   	.gpl_only	= true,
>   	.ret_type	= RET_INTEGER,
> @@ -253,7 +253,7 @@ BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
>   	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false);
>   }
>   
> -static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
> +const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
>   	.func		= bpf_probe_read_kernel_str,
>   	.gpl_only	= true,
>   	.ret_type	= RET_INTEGER,
> @@ -907,7 +907,7 @@ BPF_CALL_0(bpf_get_current_task)
>   	return (long) current;
>   }
>   
> -static const struct bpf_func_proto bpf_get_current_task_proto = {
> +const struct bpf_func_proto bpf_get_current_task_proto = {
>   	.func		= bpf_get_current_task,
>   	.gpl_only	= true,
>   	.ret_type	= RET_INTEGER,
>
John Fastabend May 25, 2020, 10:58 p.m. UTC | #2
Daniel Borkmann wrote:
> On 5/24/20 6:50 PM, John Fastabend wrote:
> > Often it is useful when applying policy to know something about the
> > task. If the administrator has CAP_SYS_ADMIN rights then they can
> > use kprobe + networking hook and link the two programs together to
> > accomplish this. However, this is a bit clunky and also means we have
> > to call both the network program and kprobe program when we could just
> > use a single program and avoid passing metadata through sk_msg/skb->cb,
> > socket, maps, etc.
> > 
> > To accomplish this add probe_* helpers to bpf_base_func_proto programs
> > guarded by a perfmon_capable() check. New supported helpers are the
> > following,
> > 
> >   BPF_FUNC_get_current_task
> >   BPF_FUNC_current_task_under_cgroup
> 
> Nit: Stale commit message?
> 

Correct, stale commit.

> >   BPF_FUNC_probe_read_user
> >   BPF_FUNC_probe_read_kernel
> >   BPF_FUNC_probe_read_user_str
> >   BPF_FUNC_probe_read_kernel_str
> > 
> > Signed-off-by: John Fastabend <john.fastabend@gmail.com>
> > Acked-by: Yonghong Song <yhs@fb.com>
> > ---

[...]
diff mbox series

Patch

diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 886949f..bb4fb63 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -601,6 +601,12 @@  const struct bpf_func_proto bpf_event_output_data_proto =  {
 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 };
 
+const struct bpf_func_proto bpf_get_current_task_proto __weak;
+const struct bpf_func_proto bpf_probe_read_user_proto __weak;
+const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
+const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
+const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
+
 const struct bpf_func_proto *
 bpf_base_func_proto(enum bpf_func_id func_id)
 {
@@ -648,6 +654,24 @@  bpf_base_func_proto(enum bpf_func_id func_id)
 	case BPF_FUNC_jiffies64:
 		return &bpf_jiffies64_proto;
 	default:
+		break;
+	}
+
+	if (!perfmon_capable())
+		return NULL;
+
+	switch (func_id) {
+	case BPF_FUNC_get_current_task:
+		return &bpf_get_current_task_proto;
+	case BPF_FUNC_probe_read_user:
+		return &bpf_probe_read_user_proto;
+	case BPF_FUNC_probe_read_kernel:
+		return &bpf_probe_read_kernel_proto;
+	case BPF_FUNC_probe_read_user_str:
+		return &bpf_probe_read_user_str_proto;
+	case BPF_FUNC_probe_read_kernel_str:
+		return &bpf_probe_read_kernel_str_proto;
+	default:
 		return NULL;
 	}
 }
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 9531f54..187cd69 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -147,7 +147,7 @@  BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
 	return ret;
 }
 
-static const struct bpf_func_proto bpf_probe_read_user_proto = {
+const struct bpf_func_proto bpf_probe_read_user_proto = {
 	.func		= bpf_probe_read_user,
 	.gpl_only	= true,
 	.ret_type	= RET_INTEGER,
@@ -167,7 +167,7 @@  BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
 	return ret;
 }
 
-static const struct bpf_func_proto bpf_probe_read_user_str_proto = {
+const struct bpf_func_proto bpf_probe_read_user_str_proto = {
 	.func		= bpf_probe_read_user_str,
 	.gpl_only	= true,
 	.ret_type	= RET_INTEGER,
@@ -198,7 +198,7 @@  BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false);
 }
 
-static const struct bpf_func_proto bpf_probe_read_kernel_proto = {
+const struct bpf_func_proto bpf_probe_read_kernel_proto = {
 	.func		= bpf_probe_read_kernel,
 	.gpl_only	= true,
 	.ret_type	= RET_INTEGER,
@@ -253,7 +253,7 @@  BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false);
 }
 
-static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
+const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
 	.func		= bpf_probe_read_kernel_str,
 	.gpl_only	= true,
 	.ret_type	= RET_INTEGER,
@@ -907,7 +907,7 @@  BPF_CALL_0(bpf_get_current_task)
 	return (long) current;
 }
 
-static const struct bpf_func_proto bpf_get_current_task_proto = {
+const struct bpf_func_proto bpf_get_current_task_proto = {
 	.func		= bpf_get_current_task,
 	.gpl_only	= true,
 	.ret_type	= RET_INTEGER,