diff mbox

[v3,2/6] cgroup: add support for eBPF programs

Message ID 1472241532-11682-3-git-send-email-daniel@zonque.org
State Changes Requested, archived
Delegated to: David Miller
Headers show

Commit Message

Daniel Mack Aug. 26, 2016, 7:58 p.m. UTC
This patch adds two sets of eBPF program pointers to struct cgroup.
One for such that are directly pinned to a cgroup, and one for such
that are effective for it.

To illustrate the logic behind that, assume the following example
cgroup hierarchy.

  A - B - C
        \ D - E

If only B has a program attached, it will be effective for B, C, D
and E. If D then attaches a program itself, that will be effective for
both D and E, and the program in B will only affect B and C. Only one
program of a given type is effective for a cgroup.

Attaching and detaching programs will be done through the bpf(2)
syscall. For now, ingress and egress inet socket filtering are the
only supported use-cases.

Signed-off-by: Daniel Mack <daniel@zonque.org>
---
 include/linux/bpf-cgroup.h  |  70 +++++++++++++++++++
 include/linux/cgroup-defs.h |   4 ++
 init/Kconfig                |  12 ++++
 kernel/bpf/Makefile         |   1 +
 kernel/bpf/cgroup.c         | 165 ++++++++++++++++++++++++++++++++++++++++++++
 kernel/cgroup.c             |  18 +++++
 6 files changed, 270 insertions(+)
 create mode 100644 include/linux/bpf-cgroup.h
 create mode 100644 kernel/bpf/cgroup.c

Comments

Alexei Starovoitov Aug. 27, 2016, 12:03 a.m. UTC | #1
On Fri, Aug 26, 2016 at 09:58:48PM +0200, Daniel Mack wrote:
> This patch adds two sets of eBPF program pointers to struct cgroup.
> One for such that are directly pinned to a cgroup, and one for such
> that are effective for it.
> 
> To illustrate the logic behind that, assume the following example
> cgroup hierarchy.
> 
>   A - B - C
>         \ D - E
> 
> If only B has a program attached, it will be effective for B, C, D
> and E. If D then attaches a program itself, that will be effective for
> both D and E, and the program in B will only affect B and C. Only one
> program of a given type is effective for a cgroup.
> 
> Attaching and detaching programs will be done through the bpf(2)
> syscall. For now, ingress and egress inet socket filtering are the
> only supported use-cases.
> 
> Signed-off-by: Daniel Mack <daniel@zonque.org>
...
> +	css_for_each_descendant_pre(pos, &cgrp->self) {
> +		struct cgroup *desc = container_of(pos, struct cgroup, self);
> +
> +		/* skip the subtree if the descendant has its own program */
> +		if (desc->bpf.prog[type] && desc != cgrp)

is desc != cgrp really needed?
I thought css_for_each_descendant_pre() shouldn't walk itself
or I'm missing how it works.

> +			pos = css_rightmost_descendant(pos);
> +		else
> +			rcu_assign_pointer(desc->bpf.effective[type],
> +					   effective);
> +	}
> +}
> +
Daniel Borkmann Aug. 29, 2016, 10:42 p.m. UTC | #2
On 08/26/2016 09:58 PM, Daniel Mack wrote:
> This patch adds two sets of eBPF program pointers to struct cgroup.
> One for such that are directly pinned to a cgroup, and one for such
> that are effective for it.
>
> To illustrate the logic behind that, assume the following example
> cgroup hierarchy.
>
>    A - B - C
>          \ D - E
>
> If only B has a program attached, it will be effective for B, C, D
> and E. If D then attaches a program itself, that will be effective for
> both D and E, and the program in B will only affect B and C. Only one
> program of a given type is effective for a cgroup.
>
> Attaching and detaching programs will be done through the bpf(2)
> syscall. For now, ingress and egress inet socket filtering are the
> only supported use-cases.
>
> Signed-off-by: Daniel Mack <daniel@zonque.org>
[...]
> +void __cgroup_bpf_update(struct cgroup *cgrp,
> +			 struct cgroup *parent,
> +			 struct bpf_prog *prog,
> +			 enum bpf_attach_type type)
> +{
> +	struct bpf_prog *old_prog, *effective;
> +	struct cgroup_subsys_state *pos;
> +
> +	old_prog = xchg(cgrp->bpf.prog + type, prog);
> +
> +	if (prog)
> +		static_branch_inc(&cgroup_bpf_enabled_key);
> +
> +	if (old_prog) {
> +		bpf_prog_put(old_prog);
> +		static_branch_dec(&cgroup_bpf_enabled_key);
> +	}
> +
> +	effective = (!prog && parent) ?
> +		rcu_dereference_protected(parent->bpf.effective[type],
> +					  lockdep_is_held(&cgroup_mutex)) :
> +		prog;
> +
> +	css_for_each_descendant_pre(pos, &cgrp->self) {
> +		struct cgroup *desc = container_of(pos, struct cgroup, self);
> +
> +		/* skip the subtree if the descendant has its own program */
> +		if (desc->bpf.prog[type] && desc != cgrp)
> +			pos = css_rightmost_descendant(pos);
> +		else
> +			rcu_assign_pointer(desc->bpf.effective[type],
> +					   effective);
> +	}

Shouldn't the old_prog reference only be released right here at the end
instead of above (otherwise this could race)?

> +}
Sargun Dhillon Aug. 29, 2016, 11:04 p.m. UTC | #3
On Fri, Aug 26, 2016 at 09:58:48PM +0200, Daniel Mack wrote:
> This patch adds two sets of eBPF program pointers to struct cgroup.
> One for such that are directly pinned to a cgroup, and one for such
> that are effective for it.
> 
> To illustrate the logic behind that, assume the following example
> cgroup hierarchy.
> 
>   A - B - C
>         \ D - E
> 
> If only B has a program attached, it will be effective for B, C, D
> and E. If D then attaches a program itself, that will be effective for
> both D and E, and the program in B will only affect B and C. Only one
> program of a given type is effective for a cgroup.
> 
How does this work when running and orchestrator within an orchestrator? The 
Docker in Docker / Mesos in Mesos use case, where the top level orchestrator is 
observing the traffic, and there is an orchestrator within that also need to run 
it.

In this case, I'd like to run E's filter, then if it returns 0, D's, and B's, 
and so on. Is it possible to allow this, either by flattening out the 
datastructure (copy a ref to the bpf programs to C and E) or something similar?


> Attaching and detaching programs will be done through the bpf(2)
> syscall. For now, ingress and egress inet socket filtering are the
> only supported use-cases.
> 
> Signed-off-by: Daniel Mack <daniel@zonque.org>
> ---
>  include/linux/bpf-cgroup.h  |  70 +++++++++++++++++++
>  include/linux/cgroup-defs.h |   4 ++
>  init/Kconfig                |  12 ++++
>  kernel/bpf/Makefile         |   1 +
>  kernel/bpf/cgroup.c         | 165 ++++++++++++++++++++++++++++++++++++++++++++
>  kernel/cgroup.c             |  18 +++++
>  6 files changed, 270 insertions(+)
>  create mode 100644 include/linux/bpf-cgroup.h
>  create mode 100644 kernel/bpf/cgroup.c
> 
> diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
> new file mode 100644
> index 0000000..a5a25c1
> --- /dev/null
> +++ b/include/linux/bpf-cgroup.h
> @@ -0,0 +1,70 @@
> +#ifndef _BPF_CGROUP_H
> +#define _BPF_CGROUP_H
> +
> +#include <linux/bpf.h>
> +#include <uapi/linux/bpf.h>
> +
> +struct sock;
> +struct cgroup;
> +struct sk_buff;
> +
> +#ifdef CONFIG_CGROUP_BPF
> +
> +extern struct static_key_false cgroup_bpf_enabled_key;
> +#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
> +
> +struct cgroup_bpf {
> +	/*
> +	 * Store two sets of bpf_prog pointers, one for programs that are
> +	 * pinned directly to this cgroup, and one for those that are effective
> +	 * when this cgroup is accessed.
> +	 */
> +	struct bpf_prog *prog[__MAX_BPF_ATTACH_TYPE];
> +	struct bpf_prog *effective[__MAX_BPF_ATTACH_TYPE];
> +};
> +
> +void cgroup_bpf_put(struct cgroup *cgrp);
> +void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
> +
> +void __cgroup_bpf_update(struct cgroup *cgrp,
> +			 struct cgroup *parent,
> +			 struct bpf_prog *prog,
> +			 enum bpf_attach_type type);
> +
> +/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
> +void cgroup_bpf_update(struct cgroup *cgrp,
> +		       struct bpf_prog *prog,
> +		       enum bpf_attach_type type);
> +
> +int __cgroup_bpf_run_filter(struct sock *sk,
> +			    struct sk_buff *skb,
> +			    enum bpf_attach_type type);
> +
> +/* Wrapper for __cgroup_bpf_run_filter() guarded by cgroup_bpf_enabled */
> +static inline int cgroup_bpf_run_filter(struct sock *sk,
> +					struct sk_buff *skb,
> +					enum bpf_attach_type type)
> +{
> +	if (cgroup_bpf_enabled)
> +		return __cgroup_bpf_run_filter(sk, skb, type);
> +
> +	return 0;
> +}
> +
> +#else
> +
> +struct cgroup_bpf {};
> +static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
> +static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
> +				      struct cgroup *parent) {}
> +
> +static inline int cgroup_bpf_run_filter(struct sock *sk,
> +					struct sk_buff *skb,
> +					enum bpf_attach_type type)
> +{
> +	return 0;
> +}
> +
> +#endif /* CONFIG_CGROUP_BPF */
> +
> +#endif /* _BPF_CGROUP_H */
> diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
> index 5b17de6..861b467 100644
> --- a/include/linux/cgroup-defs.h
> +++ b/include/linux/cgroup-defs.h
> @@ -16,6 +16,7 @@
>  #include <linux/percpu-refcount.h>
>  #include <linux/percpu-rwsem.h>
>  #include <linux/workqueue.h>
> +#include <linux/bpf-cgroup.h>
>  
>  #ifdef CONFIG_CGROUPS
>  
> @@ -300,6 +301,9 @@ struct cgroup {
>  	/* used to schedule release agent */
>  	struct work_struct release_agent_work;
>  
> +	/* used to store eBPF programs */
> +	struct cgroup_bpf bpf;
> +
>  	/* ids of the ancestors at each level including self */
>  	int ancestor_ids[];
>  };
> diff --git a/init/Kconfig b/init/Kconfig
> index cac3f09..5a89c83 100644
> --- a/init/Kconfig
> +++ b/init/Kconfig
> @@ -1144,6 +1144,18 @@ config CGROUP_PERF
>  
>  	  Say N if unsure.
>  
> +config CGROUP_BPF
> +	bool "Support for eBPF programs attached to cgroups"
> +	depends on BPF_SYSCALL && SOCK_CGROUP_DATA
> +	help
> +	  Allow attaching eBPF programs to a cgroup using the bpf(2)
> +	  syscall command BPF_PROG_ATTACH.
> +
> +	  In which context these programs are accessed depends on the type
> +	  of attachment. For instance, programs that are attached using
> +	  BPF_ATTACH_TYPE_CGROUP_INET_INGRESS will be executed on the
> +	  ingress path of inet sockets.
> +
>  config CGROUP_DEBUG
>  	bool "Example controller"
>  	default n
> diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
> index eed911d..b22256b 100644
> --- a/kernel/bpf/Makefile
> +++ b/kernel/bpf/Makefile
> @@ -5,3 +5,4 @@ obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o
>  ifeq ($(CONFIG_PERF_EVENTS),y)
>  obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
>  endif
> +obj-$(CONFIG_CGROUP_BPF) += cgroup.o
> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> new file mode 100644
> index 0000000..4d64923
> --- /dev/null
> +++ b/kernel/bpf/cgroup.c
> @@ -0,0 +1,165 @@
> +/*
> + * Functions to manage eBPF programs attached to cgroups
> + *
> + * Copyright (c) 2016 Daniel Mack
> + *
> + * This file is subject to the terms and conditions of version 2 of the GNU
> + * General Public License.  See the file COPYING in the main directory of the
> + * Linux distribution for more details.
> + */
> +
> +#include <linux/kernel.h>
> +#include <linux/atomic.h>
> +#include <linux/cgroup.h>
> +#include <linux/slab.h>
> +#include <linux/bpf.h>
> +#include <linux/bpf-cgroup.h>
> +#include <net/sock.h>
> +
> +DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
> +EXPORT_SYMBOL(cgroup_bpf_enabled_key);
> +
> +/**
> + * cgroup_bpf_put() - put references of all bpf programs
> + * @cgrp: the cgroup to modify
> + */
> +void cgroup_bpf_put(struct cgroup *cgrp)
> +{
> +	unsigned int type;
> +
> +	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.prog); type++) {
> +		struct bpf_prog *prog = cgrp->bpf.prog[type];
> +
> +		if (prog) {
> +			bpf_prog_put(prog);
> +			static_branch_dec(&cgroup_bpf_enabled_key);
> +		}
> +	}
> +}
> +
> +/**
> + * cgroup_bpf_inherit() - inherit effective programs from parent
> + * @cgrp: the cgroup to modify
> + * @parent: the parent to inherit from
> + */
> +void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
> +{
> +	unsigned int type;
> +
> +	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.effective); type++) {
> +		struct bpf_prog *e;
> +
> +		e = rcu_dereference_protected(parent->bpf.effective[type],
> +					      lockdep_is_held(&cgroup_mutex));
> +		rcu_assign_pointer(cgrp->bpf.effective[type], e);
> +	}
> +}
> +
> +/**
> + * __cgroup_bpf_update() - Update the pinned program of a cgroup, and
> + *                         propagate the change to descendants
> + * @cgrp: The cgroup which descendants to traverse
> + * @prog: A new program to pin
> + * @type: Type of pinning operation (ingress/egress)
> + *
> + * Each cgroup has a set of two pointers for bpf programs; one for eBPF
> + * programs it owns, and which is effective for execution.
> + *
> + * If @prog is %NULL, this function attaches a new program to the cgroup and
> + * releases the one that is currently attached, if any. @prog is then made
> + * the effective program of type @type in that cgroup.
> + *
> + * If @prog is %NULL, the currently attached program of type @type is released,
> + * and the effective program of the parent cgroup (if any) is inherited to
> + * @cgrp.
> + *
> + * Then, the descendants of @cgrp are walked and the effective program for
> + * each of them is set to the effective program of @cgrp unless the
> + * descendant has its own program attached, in which case the subbranch is
> + * skipped. This ensures that delegated subcgroups with own programs are left
> + * untouched.
> + *
> + * Must be called with cgroup_mutex held.
> + */
> +void __cgroup_bpf_update(struct cgroup *cgrp,
> +			 struct cgroup *parent,
> +			 struct bpf_prog *prog,
> +			 enum bpf_attach_type type)
> +{
> +	struct bpf_prog *old_prog, *effective;
> +	struct cgroup_subsys_state *pos;
> +
> +	old_prog = xchg(cgrp->bpf.prog + type, prog);
> +
> +	if (prog)
> +		static_branch_inc(&cgroup_bpf_enabled_key);
> +
> +	if (old_prog) {
> +		bpf_prog_put(old_prog);
> +		static_branch_dec(&cgroup_bpf_enabled_key);
> +	}
> +
> +	effective = (!prog && parent) ?
> +		rcu_dereference_protected(parent->bpf.effective[type],
> +					  lockdep_is_held(&cgroup_mutex)) :
> +		prog;
> +
> +	css_for_each_descendant_pre(pos, &cgrp->self) {
> +		struct cgroup *desc = container_of(pos, struct cgroup, self);
> +
> +		/* skip the subtree if the descendant has its own program */
> +		if (desc->bpf.prog[type] && desc != cgrp)
> +			pos = css_rightmost_descendant(pos);
> +		else
> +			rcu_assign_pointer(desc->bpf.effective[type],
> +					   effective);
> +	}
> +}
> +
> +/**
> + * __cgroup_bpf_run_filter() - Run a program for packet filtering
> + * @sk: The socken sending or receiving traffic
> + * @skb: The skb that is being sent or received
> + * @type: The type of program to be exectuted
> + *
> + * If no socket is passed, or the socket is not of type INET or INET6,
> + * this function does nothing and returns 0.
> + *
> + * The program type passed in via @type must be suitable for network
> + * filtering. No further check is performed to assert that.
> + *
> + * This function will return %-EPERM if any if an attached program was found
> + * and if it returned != 1 during execution. In all other cases, 0 is returned.
> + */
> +int __cgroup_bpf_run_filter(struct sock *sk,
> +			    struct sk_buff *skb,
> +			    enum bpf_attach_type type)
> +{
> +	struct bpf_prog *prog;
> +	struct cgroup *cgrp;
> +	int ret = 0;
> +
> +	if (!sk)
> +		return 0;
> +
> +	if (sk->sk_family != AF_INET &&
> +	    sk->sk_family != AF_INET6)
> +		return 0;
> +
> +	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
> +
> +	rcu_read_lock();
> +
> +	prog = rcu_dereference(cgrp->bpf.effective[type]);
> +	if (prog) {
> +		unsigned int offset = skb->data - skb_mac_header(skb);
> +
> +		__skb_push(skb, offset);
> +		ret = bpf_prog_run_clear_cb(prog, skb) == 1 ? 0 : -EPERM;
> +		__skb_pull(skb, offset);
> +	}
> +
> +	rcu_read_unlock();
> +
> +	return ret;
> +}
> diff --git a/kernel/cgroup.c b/kernel/cgroup.c
> index d1c51b7..57ade89 100644
> --- a/kernel/cgroup.c
> +++ b/kernel/cgroup.c
> @@ -5038,6 +5038,8 @@ static void css_release_work_fn(struct work_struct *work)
>  		if (cgrp->kn)
>  			RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
>  					 NULL);
> +
> +		cgroup_bpf_put(cgrp);
>  	}
>  
>  	mutex_unlock(&cgroup_mutex);
> @@ -5245,6 +5247,9 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
>  	if (!cgroup_on_dfl(cgrp))
>  		cgrp->subtree_control = cgroup_control(cgrp);
>  
> +	if (parent)
> +		cgroup_bpf_inherit(cgrp, parent);
> +
>  	cgroup_propagate_control(cgrp);
>  
>  	/* @cgrp doesn't have dir yet so the following will only create csses */
> @@ -6417,6 +6422,19 @@ static __init int cgroup_namespaces_init(void)
>  }
>  subsys_initcall(cgroup_namespaces_init);
>  
> +#ifdef CONFIG_CGROUP_BPF
> +void cgroup_bpf_update(struct cgroup *cgrp,
> +		       struct bpf_prog *prog,
> +		       enum bpf_attach_type type)
> +{
> +	struct cgroup *parent = cgroup_parent(cgrp);
> +
> +	mutex_lock(&cgroup_mutex);
> +	__cgroup_bpf_update(cgrp, parent, prog, type);
> +	mutex_unlock(&cgroup_mutex);
> +}
> +#endif /* CONFIG_CGROUP_BPF */
> +
>  #ifdef CONFIG_CGROUP_DEBUG
>  static struct cgroup_subsys_state *
>  debug_css_alloc(struct cgroup_subsys_state *parent_css)
> -- 
> 2.5.5
>
Daniel Mack Sept. 5, 2016, 12:47 p.m. UTC | #4
Hi Alexei,

On 08/27/2016 02:03 AM, Alexei Starovoitov wrote:
> On Fri, Aug 26, 2016 at 09:58:48PM +0200, Daniel Mack wrote:
>> This patch adds two sets of eBPF program pointers to struct cgroup.
>> One for such that are directly pinned to a cgroup, and one for such
>> that are effective for it.
>>
>> To illustrate the logic behind that, assume the following example
>> cgroup hierarchy.
>>
>>   A - B - C
>>         \ D - E
>>
>> If only B has a program attached, it will be effective for B, C, D
>> and E. If D then attaches a program itself, that will be effective for
>> both D and E, and the program in B will only affect B and C. Only one
>> program of a given type is effective for a cgroup.
>>
>> Attaching and detaching programs will be done through the bpf(2)
>> syscall. For now, ingress and egress inet socket filtering are the
>> only supported use-cases.
>>
>> Signed-off-by: Daniel Mack <daniel@zonque.org>
> ...
>> +	css_for_each_descendant_pre(pos, &cgrp->self) {
>> +		struct cgroup *desc = container_of(pos, struct cgroup, self);
>> +
>> +		/* skip the subtree if the descendant has its own program */
>> +		if (desc->bpf.prog[type] && desc != cgrp)
> 
> is desc != cgrp really needed?
> I thought css_for_each_descendant_pre() shouldn't walk itself
> or I'm missing how it works.

Hmm, no - that check is necessary in my tests, and also according to the
documentation:

/**
 * css_for_each_descendant_pre - pre-order walk of a css's descendants
 * @pos: the css * to use as the loop cursor
 * @root: css whose descendants to walk
 *
 * Walk @root's descendants.  @root is included in the iteration and the
 * first node to be visited.  Must be called under rcu_read_lock().
 *


Daniel
Daniel Mack Sept. 5, 2016, 12:50 p.m. UTC | #5
On 08/30/2016 12:42 AM, Daniel Borkmann wrote:
> On 08/26/2016 09:58 PM, Daniel Mack wrote:
>> This patch adds two sets of eBPF program pointers to struct cgroup.
>> One for such that are directly pinned to a cgroup, and one for such
>> that are effective for it.
>>
>> To illustrate the logic behind that, assume the following example
>> cgroup hierarchy.
>>
>>    A - B - C
>>          \ D - E
>>
>> If only B has a program attached, it will be effective for B, C, D
>> and E. If D then attaches a program itself, that will be effective for
>> both D and E, and the program in B will only affect B and C. Only one
>> program of a given type is effective for a cgroup.
>>
>> Attaching and detaching programs will be done through the bpf(2)
>> syscall. For now, ingress and egress inet socket filtering are the
>> only supported use-cases.
>>
>> Signed-off-by: Daniel Mack <daniel@zonque.org>
> [...]
>> +void __cgroup_bpf_update(struct cgroup *cgrp,
>> +			 struct cgroup *parent,
>> +			 struct bpf_prog *prog,
>> +			 enum bpf_attach_type type)
>> +{
>> +	struct bpf_prog *old_prog, *effective;
>> +	struct cgroup_subsys_state *pos;
>> +
>> +	old_prog = xchg(cgrp->bpf.prog + type, prog);
>> +
>> +	if (prog)
>> +		static_branch_inc(&cgroup_bpf_enabled_key);
>> +
>> +	if (old_prog) {
>> +		bpf_prog_put(old_prog);
>> +		static_branch_dec(&cgroup_bpf_enabled_key);
>> +	}
>> +
>> +	effective = (!prog && parent) ?
>> +		rcu_dereference_protected(parent->bpf.effective[type],
>> +					  lockdep_is_held(&cgroup_mutex)) :
>> +		prog;
>> +
>> +	css_for_each_descendant_pre(pos, &cgrp->self) {
>> +		struct cgroup *desc = container_of(pos, struct cgroup, self);
>> +
>> +		/* skip the subtree if the descendant has its own program */
>> +		if (desc->bpf.prog[type] && desc != cgrp)
>> +			pos = css_rightmost_descendant(pos);
>> +		else
>> +			rcu_assign_pointer(desc->bpf.effective[type],
>> +					   effective);
>> +	}
> 
> Shouldn't the old_prog reference only be released right here at the end
> instead of above (otherwise this could race)?

Yes, that's right. Will change as well. Thanks for spotting!


Daniel
Daniel Mack Sept. 5, 2016, 2:49 p.m. UTC | #6
Hi,

On 08/30/2016 01:04 AM, Sargun Dhillon wrote:
> On Fri, Aug 26, 2016 at 09:58:48PM +0200, Daniel Mack wrote:
>> This patch adds two sets of eBPF program pointers to struct cgroup.
>> One for such that are directly pinned to a cgroup, and one for such
>> that are effective for it.
>>
>> To illustrate the logic behind that, assume the following example
>> cgroup hierarchy.
>>
>>   A - B - C
>>         \ D - E
>>
>> If only B has a program attached, it will be effective for B, C, D
>> and E. If D then attaches a program itself, that will be effective for
>> both D and E, and the program in B will only affect B and C. Only one
>> program of a given type is effective for a cgroup.
>>
> How does this work when running and orchestrator within an orchestrator? The 
> Docker in Docker / Mesos in Mesos use case, where the top level orchestrator is 
> observing the traffic, and there is an orchestrator within that also need to run 
> it.
> 
> In this case, I'd like to run E's filter, then if it returns 0, D's, and B's, 
> and so on.

Running multiple programs was an idea I had in one of my earlier drafts,
but after some discussion, I refrained from it again because potentially
walking the cgroup hierarchy on every packet is just too expensive.

> Is it possible to allow this, either by flattening out the
> datastructure (copy a ref to the bpf programs to C and E) or
> something similar?

That would mean we carry a list of eBPF program pointers of dynamic
size. IOW, the deeper inside the cgroup hierarchy, the bigger the list,
so it can store a reference to all programs of all of its ancestor.

While I think that would be possible, even at some later point, I'd
really like to avoid it for the sake of simplicity.

Is there any reason why this can't be done in userspace? Compile a
program X for A, and overload it with Y, with Y doing the same than X
but add some extra checks? Note that all users of the bpf(2) syscall API
will need CAP_NET_ADMIN anyway, so there is no delegation to
unprivileged sub-orchestators or anything alike really.


Thanks,
Daniel
Sargun Dhillon Sept. 5, 2016, 9:40 p.m. UTC | #7
On Mon, Sep 05, 2016 at 04:49:26PM +0200, Daniel Mack wrote:
> Hi,
> 
> On 08/30/2016 01:04 AM, Sargun Dhillon wrote:
> > On Fri, Aug 26, 2016 at 09:58:48PM +0200, Daniel Mack wrote:
> >> This patch adds two sets of eBPF program pointers to struct cgroup.
> >> One for such that are directly pinned to a cgroup, and one for such
> >> that are effective for it.
> >>
> >> To illustrate the logic behind that, assume the following example
> >> cgroup hierarchy.
> >>
> >>   A - B - C
> >>         \ D - E
> >>
> >> If only B has a program attached, it will be effective for B, C, D
> >> and E. If D then attaches a program itself, that will be effective for
> >> both D and E, and the program in B will only affect B and C. Only one
> >> program of a given type is effective for a cgroup.
> >>
> > How does this work when running and orchestrator within an orchestrator? The 
> > Docker in Docker / Mesos in Mesos use case, where the top level orchestrator is 
> > observing the traffic, and there is an orchestrator within that also need to run 
> > it.
> > 
> > In this case, I'd like to run E's filter, then if it returns 0, D's, and B's, 
> > and so on.
> 
> Running multiple programs was an idea I had in one of my earlier drafts,
> but after some discussion, I refrained from it again because potentially
> walking the cgroup hierarchy on every packet is just too expensive.
>
I think you're correct here. Maybe this is something I do with the LSM-attached 
filters, and not for skb filters. Do you think there might be a way to opt-in to 
this option? 

> > Is it possible to allow this, either by flattening out the
> > datastructure (copy a ref to the bpf programs to C and E) or
> > something similar?
> 
> That would mean we carry a list of eBPF program pointers of dynamic
> size. IOW, the deeper inside the cgroup hierarchy, the bigger the list,
> so it can store a reference to all programs of all of its ancestor.
> 
> While I think that would be possible, even at some later point, I'd
> really like to avoid it for the sake of simplicity.
> 
> Is there any reason why this can't be done in userspace? Compile a
> program X for A, and overload it with Y, with Y doing the same than X
> but add some extra checks? Note that all users of the bpf(2) syscall API
> will need CAP_NET_ADMIN anyway, so there is no delegation to
> unprivileged sub-orchestators or anything alike really.

One of the use-cases that's becoming more and more common are 
containers-in-containers. In this, you have a privileged container that's 
running something like build orchestration, and you want to do macro-isolation 
(say limit access to only that tennant's infrastructure). Then, when the build 
orchestrator runs a build, it may want to monitor, and further isolate the tasks 
that run in the build job. This is a side-effect of composing different 
container technologies. Typically you use one system for images, then another 
for orchestration, and the actual program running inside of it can also leverage 
containerization.

Example:
K8s->Docker->Jenkins Agent->Jenkins Build Job

There's also a differentiation of ownership in each of these systems. I would 
really not require a middleware system that all my software has to talk to, 
because sometimes I'm taking off the shelf software (Jenkins), and porting it to 
containers. I think one of the pieces that's led to the success of cgroups is 
the straightforward API, and ease of use (and it's getting even easier in v2).

It's perfectly fine to give the lower level tasks CAP_NET_ADMIN, because we use 
something like seccomp-bpf plus some of the work I've been doing with the LSM to 
prevent the sub-orchestrators from accidentally blowing away the system. 
Usually, we trust these orchestrators (internal users), so it's more of a 
precautionary measure as opposed to a true security measure.

Also, rewriting BPF programs, although pretty straightforward sounds like a pain 
to do in userspace, even with a helper. If we were to take peoples programs and 
chain them together via tail call, or similar, I can imagine where rewriting a 
program might push you over the instruction limit.
> 
> 
> Thanks,
> Daniel
>
Alexei Starovoitov Sept. 5, 2016, 10:39 p.m. UTC | #8
On 9/5/16 2:40 PM, Sargun Dhillon wrote:
> On Mon, Sep 05, 2016 at 04:49:26PM +0200, Daniel Mack wrote:
>> Hi,
>>
>> On 08/30/2016 01:04 AM, Sargun Dhillon wrote:
>>> On Fri, Aug 26, 2016 at 09:58:48PM +0200, Daniel Mack wrote:
>>>> This patch adds two sets of eBPF program pointers to struct cgroup.
>>>> One for such that are directly pinned to a cgroup, and one for such
>>>> that are effective for it.
>>>>
>>>> To illustrate the logic behind that, assume the following example
>>>> cgroup hierarchy.
>>>>
>>>>    A - B - C
>>>>          \ D - E
>>>>
>>>> If only B has a program attached, it will be effective for B, C, D
>>>> and E. If D then attaches a program itself, that will be effective for
>>>> both D and E, and the program in B will only affect B and C. Only one
>>>> program of a given type is effective for a cgroup.
>>>>
>>> How does this work when running and orchestrator within an orchestrator? The
>>> Docker in Docker / Mesos in Mesos use case, where the top level orchestrator is
>>> observing the traffic, and there is an orchestrator within that also need to run
>>> it.
>>>
>>> In this case, I'd like to run E's filter, then if it returns 0, D's, and B's,
>>> and so on.
>>
>> Running multiple programs was an idea I had in one of my earlier drafts,
>> but after some discussion, I refrained from it again because potentially
>> walking the cgroup hierarchy on every packet is just too expensive.
>>
> I think you're correct here. Maybe this is something I do with the LSM-attached
> filters, and not for skb filters. Do you think there might be a way to opt-in to
> this option?
>
>>> Is it possible to allow this, either by flattening out the
>>> datastructure (copy a ref to the bpf programs to C and E) or
>>> something similar?
>>
>> That would mean we carry a list of eBPF program pointers of dynamic
>> size. IOW, the deeper inside the cgroup hierarchy, the bigger the list,
>> so it can store a reference to all programs of all of its ancestor.
>>
>> While I think that would be possible, even at some later point, I'd
>> really like to avoid it for the sake of simplicity.
>>
>> Is there any reason why this can't be done in userspace? Compile a
>> program X for A, and overload it with Y, with Y doing the same than X
>> but add some extra checks? Note that all users of the bpf(2) syscall API
>> will need CAP_NET_ADMIN anyway, so there is no delegation to
>> unprivileged sub-orchestators or anything alike really.
>
> One of the use-cases that's becoming more and more common are
> containers-in-containers. In this, you have a privileged container that's
> running something like build orchestration, and you want to do macro-isolation
> (say limit access to only that tennant's infrastructure). Then, when the build
> orchestrator runs a build, it may want to monitor, and further isolate the tasks
> that run in the build job. This is a side-effect of composing different
> container technologies. Typically you use one system for images, then another
> for orchestration, and the actual program running inside of it can also leverage
> containerization.
>
> Example:
> K8s->Docker->Jenkins Agent->Jenkins Build Job

frankly I don't buy this argument, since above
and other 'examples' of container-in-container look
fake to me. There is a ton work to be done for such
scheme to be even remotely feasible. The cgroup+bpf
stuff would be the last on my list to 'fix' for such
deployments. I don't think we should worry about it
at present.
diff mbox

Patch

diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
new file mode 100644
index 0000000..a5a25c1
--- /dev/null
+++ b/include/linux/bpf-cgroup.h
@@ -0,0 +1,70 @@ 
+#ifndef _BPF_CGROUP_H
+#define _BPF_CGROUP_H
+
+#include <linux/bpf.h>
+#include <uapi/linux/bpf.h>
+
+struct sock;
+struct cgroup;
+struct sk_buff;
+
+#ifdef CONFIG_CGROUP_BPF
+
+extern struct static_key_false cgroup_bpf_enabled_key;
+#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+
+struct cgroup_bpf {
+	/*
+	 * Store two sets of bpf_prog pointers, one for programs that are
+	 * pinned directly to this cgroup, and one for those that are effective
+	 * when this cgroup is accessed.
+	 */
+	struct bpf_prog *prog[__MAX_BPF_ATTACH_TYPE];
+	struct bpf_prog *effective[__MAX_BPF_ATTACH_TYPE];
+};
+
+void cgroup_bpf_put(struct cgroup *cgrp);
+void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
+
+void __cgroup_bpf_update(struct cgroup *cgrp,
+			 struct cgroup *parent,
+			 struct bpf_prog *prog,
+			 enum bpf_attach_type type);
+
+/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
+void cgroup_bpf_update(struct cgroup *cgrp,
+		       struct bpf_prog *prog,
+		       enum bpf_attach_type type);
+
+int __cgroup_bpf_run_filter(struct sock *sk,
+			    struct sk_buff *skb,
+			    enum bpf_attach_type type);
+
+/* Wrapper for __cgroup_bpf_run_filter() guarded by cgroup_bpf_enabled */
+static inline int cgroup_bpf_run_filter(struct sock *sk,
+					struct sk_buff *skb,
+					enum bpf_attach_type type)
+{
+	if (cgroup_bpf_enabled)
+		return __cgroup_bpf_run_filter(sk, skb, type);
+
+	return 0;
+}
+
+#else
+
+struct cgroup_bpf {};
+static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
+				      struct cgroup *parent) {}
+
+static inline int cgroup_bpf_run_filter(struct sock *sk,
+					struct sk_buff *skb,
+					enum bpf_attach_type type)
+{
+	return 0;
+}
+
+#endif /* CONFIG_CGROUP_BPF */
+
+#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 5b17de6..861b467 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -16,6 +16,7 @@ 
 #include <linux/percpu-refcount.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/workqueue.h>
+#include <linux/bpf-cgroup.h>
 
 #ifdef CONFIG_CGROUPS
 
@@ -300,6 +301,9 @@  struct cgroup {
 	/* used to schedule release agent */
 	struct work_struct release_agent_work;
 
+	/* used to store eBPF programs */
+	struct cgroup_bpf bpf;
+
 	/* ids of the ancestors at each level including self */
 	int ancestor_ids[];
 };
diff --git a/init/Kconfig b/init/Kconfig
index cac3f09..5a89c83 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1144,6 +1144,18 @@  config CGROUP_PERF
 
 	  Say N if unsure.
 
+config CGROUP_BPF
+	bool "Support for eBPF programs attached to cgroups"
+	depends on BPF_SYSCALL && SOCK_CGROUP_DATA
+	help
+	  Allow attaching eBPF programs to a cgroup using the bpf(2)
+	  syscall command BPF_PROG_ATTACH.
+
+	  In which context these programs are accessed depends on the type
+	  of attachment. For instance, programs that are attached using
+	  BPF_ATTACH_TYPE_CGROUP_INET_INGRESS will be executed on the
+	  ingress path of inet sockets.
+
 config CGROUP_DEBUG
 	bool "Example controller"
 	default n
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index eed911d..b22256b 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -5,3 +5,4 @@  obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o
 ifeq ($(CONFIG_PERF_EVENTS),y)
 obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
 endif
+obj-$(CONFIG_CGROUP_BPF) += cgroup.o
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
new file mode 100644
index 0000000..4d64923
--- /dev/null
+++ b/kernel/bpf/cgroup.c
@@ -0,0 +1,165 @@ 
+/*
+ * Functions to manage eBPF programs attached to cgroups
+ *
+ * Copyright (c) 2016 Daniel Mack
+ *
+ * This file is subject to the terms and conditions of version 2 of the GNU
+ * General Public License.  See the file COPYING in the main directory of the
+ * Linux distribution for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/cgroup.h>
+#include <linux/slab.h>
+#include <linux/bpf.h>
+#include <linux/bpf-cgroup.h>
+#include <net/sock.h>
+
+DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
+EXPORT_SYMBOL(cgroup_bpf_enabled_key);
+
+/**
+ * cgroup_bpf_put() - put references of all bpf programs
+ * @cgrp: the cgroup to modify
+ */
+void cgroup_bpf_put(struct cgroup *cgrp)
+{
+	unsigned int type;
+
+	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.prog); type++) {
+		struct bpf_prog *prog = cgrp->bpf.prog[type];
+
+		if (prog) {
+			bpf_prog_put(prog);
+			static_branch_dec(&cgroup_bpf_enabled_key);
+		}
+	}
+}
+
+/**
+ * cgroup_bpf_inherit() - inherit effective programs from parent
+ * @cgrp: the cgroup to modify
+ * @parent: the parent to inherit from
+ */
+void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
+{
+	unsigned int type;
+
+	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.effective); type++) {
+		struct bpf_prog *e;
+
+		e = rcu_dereference_protected(parent->bpf.effective[type],
+					      lockdep_is_held(&cgroup_mutex));
+		rcu_assign_pointer(cgrp->bpf.effective[type], e);
+	}
+}
+
+/**
+ * __cgroup_bpf_update() - Update the pinned program of a cgroup, and
+ *                         propagate the change to descendants
+ * @cgrp: The cgroup which descendants to traverse
+ * @prog: A new program to pin
+ * @type: Type of pinning operation (ingress/egress)
+ *
+ * Each cgroup has a set of two pointers for bpf programs; one for eBPF
+ * programs it owns, and which is effective for execution.
+ *
+ * If @prog is %NULL, this function attaches a new program to the cgroup and
+ * releases the one that is currently attached, if any. @prog is then made
+ * the effective program of type @type in that cgroup.
+ *
+ * If @prog is %NULL, the currently attached program of type @type is released,
+ * and the effective program of the parent cgroup (if any) is inherited to
+ * @cgrp.
+ *
+ * Then, the descendants of @cgrp are walked and the effective program for
+ * each of them is set to the effective program of @cgrp unless the
+ * descendant has its own program attached, in which case the subbranch is
+ * skipped. This ensures that delegated subcgroups with own programs are left
+ * untouched.
+ *
+ * Must be called with cgroup_mutex held.
+ */
+void __cgroup_bpf_update(struct cgroup *cgrp,
+			 struct cgroup *parent,
+			 struct bpf_prog *prog,
+			 enum bpf_attach_type type)
+{
+	struct bpf_prog *old_prog, *effective;
+	struct cgroup_subsys_state *pos;
+
+	old_prog = xchg(cgrp->bpf.prog + type, prog);
+
+	if (prog)
+		static_branch_inc(&cgroup_bpf_enabled_key);
+
+	if (old_prog) {
+		bpf_prog_put(old_prog);
+		static_branch_dec(&cgroup_bpf_enabled_key);
+	}
+
+	effective = (!prog && parent) ?
+		rcu_dereference_protected(parent->bpf.effective[type],
+					  lockdep_is_held(&cgroup_mutex)) :
+		prog;
+
+	css_for_each_descendant_pre(pos, &cgrp->self) {
+		struct cgroup *desc = container_of(pos, struct cgroup, self);
+
+		/* skip the subtree if the descendant has its own program */
+		if (desc->bpf.prog[type] && desc != cgrp)
+			pos = css_rightmost_descendant(pos);
+		else
+			rcu_assign_pointer(desc->bpf.effective[type],
+					   effective);
+	}
+}
+
+/**
+ * __cgroup_bpf_run_filter() - Run a program for packet filtering
+ * @sk: The socken sending or receiving traffic
+ * @skb: The skb that is being sent or received
+ * @type: The type of program to be exectuted
+ *
+ * If no socket is passed, or the socket is not of type INET or INET6,
+ * this function does nothing and returns 0.
+ *
+ * The program type passed in via @type must be suitable for network
+ * filtering. No further check is performed to assert that.
+ *
+ * This function will return %-EPERM if any if an attached program was found
+ * and if it returned != 1 during execution. In all other cases, 0 is returned.
+ */
+int __cgroup_bpf_run_filter(struct sock *sk,
+			    struct sk_buff *skb,
+			    enum bpf_attach_type type)
+{
+	struct bpf_prog *prog;
+	struct cgroup *cgrp;
+	int ret = 0;
+
+	if (!sk)
+		return 0;
+
+	if (sk->sk_family != AF_INET &&
+	    sk->sk_family != AF_INET6)
+		return 0;
+
+	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+
+	rcu_read_lock();
+
+	prog = rcu_dereference(cgrp->bpf.effective[type]);
+	if (prog) {
+		unsigned int offset = skb->data - skb_mac_header(skb);
+
+		__skb_push(skb, offset);
+		ret = bpf_prog_run_clear_cb(prog, skb) == 1 ? 0 : -EPERM;
+		__skb_pull(skb, offset);
+	}
+
+	rcu_read_unlock();
+
+	return ret;
+}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d1c51b7..57ade89 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5038,6 +5038,8 @@  static void css_release_work_fn(struct work_struct *work)
 		if (cgrp->kn)
 			RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
 					 NULL);
+
+		cgroup_bpf_put(cgrp);
 	}
 
 	mutex_unlock(&cgroup_mutex);
@@ -5245,6 +5247,9 @@  static struct cgroup *cgroup_create(struct cgroup *parent)
 	if (!cgroup_on_dfl(cgrp))
 		cgrp->subtree_control = cgroup_control(cgrp);
 
+	if (parent)
+		cgroup_bpf_inherit(cgrp, parent);
+
 	cgroup_propagate_control(cgrp);
 
 	/* @cgrp doesn't have dir yet so the following will only create csses */
@@ -6417,6 +6422,19 @@  static __init int cgroup_namespaces_init(void)
 }
 subsys_initcall(cgroup_namespaces_init);
 
+#ifdef CONFIG_CGROUP_BPF
+void cgroup_bpf_update(struct cgroup *cgrp,
+		       struct bpf_prog *prog,
+		       enum bpf_attach_type type)
+{
+	struct cgroup *parent = cgroup_parent(cgrp);
+
+	mutex_lock(&cgroup_mutex);
+	__cgroup_bpf_update(cgrp, parent, prog, type);
+	mutex_unlock(&cgroup_mutex);
+}
+#endif /* CONFIG_CGROUP_BPF */
+
 #ifdef CONFIG_CGROUP_DEBUG
 static struct cgroup_subsys_state *
 debug_css_alloc(struct cgroup_subsys_state *parent_css)