diff mbox series

[bpf-next,2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector

Message ID 20190122212315.137291-3-sdf@google.com
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series support flow dissector in BPF_PROG_TEST_RUN | expand

Commit Message

Stanislav Fomichev Jan. 22, 2019, 9:23 p.m. UTC
The input is packet data, the output is struct bpf_flow_key. This should
make it easy to test flow dissector programs without elaborate
setup.

Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
 include/linux/bpf.h |  3 ++
 net/bpf/test_run.c  | 75 ++++++++++++++++++++++++++++++++++++++++++++-
 net/core/filter.c   |  1 +
 3 files changed, 78 insertions(+), 1 deletion(-)

Comments

Alexei Starovoitov Jan. 24, 2019, 3:57 a.m. UTC | #1
On Tue, Jan 22, 2019 at 01:23:14PM -0800, Stanislav Fomichev wrote:
> The input is packet data, the output is struct bpf_flow_key. This should
> make it easy to test flow dissector programs without elaborate
> setup.
> 
> Signed-off-by: Stanislav Fomichev <sdf@google.com>
> ---
>  include/linux/bpf.h |  3 ++
>  net/bpf/test_run.c  | 75 ++++++++++++++++++++++++++++++++++++++++++++-
>  net/core/filter.c   |  1 +
>  3 files changed, 78 insertions(+), 1 deletion(-)
> 
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index e734f163bd0b..701ef954a258 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -397,6 +397,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
>  			  union bpf_attr __user *uattr);
>  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
>  			  union bpf_attr __user *uattr);
> +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> +				     const union bpf_attr *kattr,
> +				     union bpf_attr __user *uattr);
>  
>  /* an array of programs to be executed under rcu_lock.
>   *
> diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> index fa2644d276ef..ecad72885f23 100644
> --- a/net/bpf/test_run.c
> +++ b/net/bpf/test_run.c
> @@ -16,12 +16,26 @@
>  static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
>  		struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
>  {
> +	struct bpf_skb_data_end *cb;
> +	struct sk_buff *skb;
>  	u32 ret;
>  
>  	preempt_disable();
>  	rcu_read_lock();
>  	bpf_cgroup_storage_set(storage);
> -	ret = BPF_PROG_RUN(prog, ctx);
> +
> +	switch (prog->type) {
> +	case BPF_PROG_TYPE_FLOW_DISSECTOR:
> +		skb = (struct sk_buff *)ctx;
> +		cb = (struct bpf_skb_data_end *)skb->cb;
> +		ret = __skb_flow_bpf_dissect(prog, ctx, &flow_keys_dissector,
> +					     cb->qdisc_cb.flow_keys);
> +		break;
> +	default:
> +		ret = BPF_PROG_RUN(prog, ctx);
> +		break;
> +	}

What is the benefit of this minimal code reuse?
It seems to me bpf_test_run_one() gets slower for all,
since prog type needs to be checked before every prog run.
The point of bpf_prog_ops->test_run was to avoid this overhead.
Are you somehow expecting flow_dissector prog using cgroup local storage?
I don't think that's possible.
Stanislav Fomichev Jan. 24, 2019, 4:34 p.m. UTC | #2
On 01/23, Alexei Starovoitov wrote:
> On Tue, Jan 22, 2019 at 01:23:14PM -0800, Stanislav Fomichev wrote:
> > The input is packet data, the output is struct bpf_flow_key. This should
> > make it easy to test flow dissector programs without elaborate
> > setup.
> > 
> > Signed-off-by: Stanislav Fomichev <sdf@google.com>
> > ---
> >  include/linux/bpf.h |  3 ++
> >  net/bpf/test_run.c  | 75 ++++++++++++++++++++++++++++++++++++++++++++-
> >  net/core/filter.c   |  1 +
> >  3 files changed, 78 insertions(+), 1 deletion(-)
> > 
> > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > index e734f163bd0b..701ef954a258 100644
> > --- a/include/linux/bpf.h
> > +++ b/include/linux/bpf.h
> > @@ -397,6 +397,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> >  			  union bpf_attr __user *uattr);
> >  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
> >  			  union bpf_attr __user *uattr);
> > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > +				     const union bpf_attr *kattr,
> > +				     union bpf_attr __user *uattr);
> >  
> >  /* an array of programs to be executed under rcu_lock.
> >   *
> > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> > index fa2644d276ef..ecad72885f23 100644
> > --- a/net/bpf/test_run.c
> > +++ b/net/bpf/test_run.c
> > @@ -16,12 +16,26 @@
> >  static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
> >  		struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
> >  {
> > +	struct bpf_skb_data_end *cb;
> > +	struct sk_buff *skb;
> >  	u32 ret;
> >  
> >  	preempt_disable();
> >  	rcu_read_lock();
> >  	bpf_cgroup_storage_set(storage);
> > -	ret = BPF_PROG_RUN(prog, ctx);
> > +
> > +	switch (prog->type) {
> > +	case BPF_PROG_TYPE_FLOW_DISSECTOR:
> > +		skb = (struct sk_buff *)ctx;
> > +		cb = (struct bpf_skb_data_end *)skb->cb;
> > +		ret = __skb_flow_bpf_dissect(prog, ctx, &flow_keys_dissector,
> > +					     cb->qdisc_cb.flow_keys);
> > +		break;
> > +	default:
> > +		ret = BPF_PROG_RUN(prog, ctx);
> > +		break;
> > +	}
> 
> What is the benefit of this minimal code reuse?
> It seems to me bpf_test_run_one() gets slower for all,
> since prog type needs to be checked before every prog run.
> The point of bpf_prog_ops->test_run was to avoid this overhead.
> Are you somehow expecting flow_dissector prog using cgroup local storage?
> I don't think that's possible.
Agreed. Initially I didn't want to re-implement the logic of
bpf_test_run. But now that you mention that it's mostly about cgroup local
storage, I think I can do a simple loop inside of
bpf_prog_test_run_flow_dissector. Thanks, will follow up with another
series!
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e734f163bd0b..701ef954a258 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -397,6 +397,9 @@  int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 			  union bpf_attr __user *uattr);
 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
 			  union bpf_attr __user *uattr);
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+				     const union bpf_attr *kattr,
+				     union bpf_attr __user *uattr);
 
 /* an array of programs to be executed under rcu_lock.
  *
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fa2644d276ef..ecad72885f23 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -16,12 +16,26 @@ 
 static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
 		struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
 {
+	struct bpf_skb_data_end *cb;
+	struct sk_buff *skb;
 	u32 ret;
 
 	preempt_disable();
 	rcu_read_lock();
 	bpf_cgroup_storage_set(storage);
-	ret = BPF_PROG_RUN(prog, ctx);
+
+	switch (prog->type) {
+	case BPF_PROG_TYPE_FLOW_DISSECTOR:
+		skb = (struct sk_buff *)ctx;
+		cb = (struct bpf_skb_data_end *)skb->cb;
+		ret = __skb_flow_bpf_dissect(prog, ctx, &flow_keys_dissector,
+					     cb->qdisc_cb.flow_keys);
+		break;
+	default:
+		ret = BPF_PROG_RUN(prog, ctx);
+		break;
+	}
+
 	rcu_read_unlock();
 	preempt_enable();
 
@@ -240,3 +254,62 @@  int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 	kfree(data);
 	return ret;
 }
+
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+				     const union bpf_attr *kattr,
+				     union bpf_attr __user *uattr)
+{
+	u32 size = kattr->test.data_size_in;
+	u32 repeat = kattr->test.repeat;
+	struct bpf_flow_keys flow_keys;
+	struct bpf_skb_data_end *cb;
+	u32 retval, duration;
+	struct sk_buff *skb;
+	struct sock *sk;
+	void *data;
+	int ret;
+
+	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
+		return -EINVAL;
+
+	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
+			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	sk = kzalloc(sizeof(*sk), GFP_USER);
+	if (!sk) {
+		kfree(data);
+		return -ENOMEM;
+	}
+	sock_net_set(sk, current->nsproxy->net_ns);
+	sock_init_data(NULL, sk);
+
+	skb = build_skb(data, 0);
+	if (!skb) {
+		kfree(data);
+		kfree(sk);
+		return -ENOMEM;
+	}
+	skb->sk = sk;
+
+	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+	__skb_put(skb, size);
+	skb->protocol = eth_type_trans(skb,
+				       current->nsproxy->net_ns->loopback_dev);
+	skb_reset_network_header(skb);
+
+	cb = (struct bpf_skb_data_end *)skb->cb;
+	cb->qdisc_cb.flow_keys = &flow_keys;
+	ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
+	if (ret)
+		goto out;
+
+	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
+			      retval, duration);
+
+out:
+	kfree_skb(skb);
+	kfree(sk);
+	return ret;
+}
diff --git a/net/core/filter.c b/net/core/filter.c
index 2b3b436ef545..ff4641dae2be 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7690,6 +7690,7 @@  const struct bpf_verifier_ops flow_dissector_verifier_ops = {
 };
 
 const struct bpf_prog_ops flow_dissector_prog_ops = {
+	.test_run		= bpf_prog_test_run_flow_dissector,
 };
 
 int sk_detach_filter(struct sock *sk)