diff mbox series

[bpf-next,v2,2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector

Message ID 20190124164953.29740-3-sdf@google.com
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series support flow dissector in BPF_PROG_TEST_RUN | expand

Commit Message

Stanislav Fomichev Jan. 24, 2019, 4:49 p.m. UTC
The input is packet data, the output is struct bpf_flow_key. This should
make it easy to test flow dissector programs without elaborate
setup.

Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
 include/linux/bpf.h |  3 ++
 net/bpf/test_run.c  | 82 +++++++++++++++++++++++++++++++++++++++++++++
 net/core/filter.c   |  1 +
 3 files changed, 86 insertions(+)

Comments

Song Liu Jan. 26, 2019, 12:31 a.m. UTC | #1
On Thu, Jan 24, 2019 at 8:53 AM Stanislav Fomichev <sdf@google.com> wrote:
>
> The input is packet data, the output is struct bpf_flow_key. This should
> make it easy to test flow dissector programs without elaborate
> setup.
>
> Signed-off-by: Stanislav Fomichev <sdf@google.com>
> ---
>  include/linux/bpf.h |  3 ++
>  net/bpf/test_run.c  | 82 +++++++++++++++++++++++++++++++++++++++++++++
>  net/core/filter.c   |  1 +
>  3 files changed, 86 insertions(+)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index e734f163bd0b..701ef954a258 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -397,6 +397,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
>                           union bpf_attr __user *uattr);
>  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
>                           union bpf_attr __user *uattr);
> +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> +                                    const union bpf_attr *kattr,
> +                                    union bpf_attr __user *uattr);
>
>  /* an array of programs to be executed under rcu_lock.
>   *
> diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> index fa2644d276ef..2c5172b33209 100644
> --- a/net/bpf/test_run.c
> +++ b/net/bpf/test_run.c
> @@ -240,3 +240,85 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
>         kfree(data);
>         return ret;
>  }
> +
> +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> +                                    const union bpf_attr *kattr,
> +                                    union bpf_attr __user *uattr)

I think this function duplicates a lot of logic from bpf_prog_test_run_skb().
Can we somehow reuse bpf_prog_test_run_skb()?

Thanks,
Song


> +{
> +       u32 size = kattr->test.data_size_in;
> +       u32 repeat = kattr->test.repeat;
> +       struct bpf_flow_keys flow_keys;
> +       u64 time_start, time_spent = 0;
> +       struct bpf_skb_data_end *cb;
> +       u32 retval, duration;
> +       struct sk_buff *skb;
> +       struct sock *sk;
> +       void *data;
> +       int ret;
> +       u32 i;
> +
> +       if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
> +               return -EINVAL;
> +
> +       data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
> +                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
> +       if (IS_ERR(data))
> +               return PTR_ERR(data);
> +
> +       sk = kzalloc(sizeof(*sk), GFP_USER);
> +       if (!sk) {
> +               kfree(data);
> +               return -ENOMEM;
> +       }
> +       sock_net_set(sk, current->nsproxy->net_ns);
> +       sock_init_data(NULL, sk);
> +
> +       skb = build_skb(data, 0);
> +       if (!skb) {
> +               kfree(data);
> +               kfree(sk);
> +               return -ENOMEM;
> +       }
> +       skb->sk = sk;
> +
> +       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
> +       __skb_put(skb, size);
> +       skb->protocol = eth_type_trans(skb,
> +                                      current->nsproxy->net_ns->loopback_dev);
> +       skb_reset_network_header(skb);
> +
> +       cb = (struct bpf_skb_data_end *)skb->cb;
> +       cb->qdisc_cb.flow_keys = &flow_keys;
> +
> +       if (!repeat)
> +               repeat = 1;
> +
> +       time_start = ktime_get_ns();
> +       for (i = 0; i < repeat; i++) {
> +               preempt_disable();
> +               rcu_read_lock();
> +               retval = __skb_flow_bpf_dissect(prog, skb,
> +                                               &flow_keys_dissector,
> +                                               &flow_keys);
> +               rcu_read_unlock();
> +               preempt_enable();
> +
> +               if (need_resched()) {
> +                       if (signal_pending(current))
> +                               break;
> +                       time_spent += ktime_get_ns() - time_start;
> +                       cond_resched();
> +                       time_start = ktime_get_ns();
> +               }
> +       }
> +       time_spent += ktime_get_ns() - time_start;
> +       do_div(time_spent, repeat);
> +       duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
> +
> +       ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
> +                             retval, duration);
> +
> +       kfree_skb(skb);
> +       kfree(sk);
> +       return ret;
> +}
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 2b3b436ef545..ff4641dae2be 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -7690,6 +7690,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
>  };
>
>  const struct bpf_prog_ops flow_dissector_prog_ops = {
> +       .test_run               = bpf_prog_test_run_flow_dissector,
>  };
>
>  int sk_detach_filter(struct sock *sk)
> --
> 2.20.1.321.g9e740568ce-goog
>
Stanislav Fomichev Jan. 26, 2019, 12:56 a.m. UTC | #2
On 01/25, Song Liu wrote:
> On Thu, Jan 24, 2019 at 8:53 AM Stanislav Fomichev <sdf@google.com> wrote:
> >
> > The input is packet data, the output is struct bpf_flow_key. This should
> > make it easy to test flow dissector programs without elaborate
> > setup.
> >
> > Signed-off-by: Stanislav Fomichev <sdf@google.com>
> > ---
> >  include/linux/bpf.h |  3 ++
> >  net/bpf/test_run.c  | 82 +++++++++++++++++++++++++++++++++++++++++++++
> >  net/core/filter.c   |  1 +
> >  3 files changed, 86 insertions(+)
> >
> > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > index e734f163bd0b..701ef954a258 100644
> > --- a/include/linux/bpf.h
> > +++ b/include/linux/bpf.h
> > @@ -397,6 +397,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> >                           union bpf_attr __user *uattr);
> >  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
> >                           union bpf_attr __user *uattr);
> > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > +                                    const union bpf_attr *kattr,
> > +                                    union bpf_attr __user *uattr);
> >
> >  /* an array of programs to be executed under rcu_lock.
> >   *
> > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> > index fa2644d276ef..2c5172b33209 100644
> > --- a/net/bpf/test_run.c
> > +++ b/net/bpf/test_run.c
> > @@ -240,3 +240,85 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> >         kfree(data);
> >         return ret;
> >  }
> > +
> > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > +                                    const union bpf_attr *kattr,
> > +                                    union bpf_attr __user *uattr)
> 
> I think this function duplicates a lot of logic from bpf_prog_test_run_skb().
> Can we somehow reuse bpf_prog_test_run_skb()?
I did that initially
(https://marc.info/?l=linux-netdev&m=154830227529929&w=2), but then
Alexey suggested that there is not much to reuse (plus can hinder the
test performance of the existing types).
> 
> Thanks,
> Song
> 
> 
> > +{
> > +       u32 size = kattr->test.data_size_in;
> > +       u32 repeat = kattr->test.repeat;
> > +       struct bpf_flow_keys flow_keys;
> > +       u64 time_start, time_spent = 0;
> > +       struct bpf_skb_data_end *cb;
> > +       u32 retval, duration;
> > +       struct sk_buff *skb;
> > +       struct sock *sk;
> > +       void *data;
> > +       int ret;
> > +       u32 i;
> > +
> > +       if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
> > +               return -EINVAL;
> > +
> > +       data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
> > +                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
> > +       if (IS_ERR(data))
> > +               return PTR_ERR(data);
> > +
> > +       sk = kzalloc(sizeof(*sk), GFP_USER);
> > +       if (!sk) {
> > +               kfree(data);
> > +               return -ENOMEM;
> > +       }
> > +       sock_net_set(sk, current->nsproxy->net_ns);
> > +       sock_init_data(NULL, sk);
> > +
> > +       skb = build_skb(data, 0);
> > +       if (!skb) {
> > +               kfree(data);
> > +               kfree(sk);
> > +               return -ENOMEM;
> > +       }
> > +       skb->sk = sk;
> > +
> > +       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
> > +       __skb_put(skb, size);
> > +       skb->protocol = eth_type_trans(skb,
> > +                                      current->nsproxy->net_ns->loopback_dev);
> > +       skb_reset_network_header(skb);
> > +
> > +       cb = (struct bpf_skb_data_end *)skb->cb;
> > +       cb->qdisc_cb.flow_keys = &flow_keys;
> > +
> > +       if (!repeat)
> > +               repeat = 1;
> > +
> > +       time_start = ktime_get_ns();
> > +       for (i = 0; i < repeat; i++) {
> > +               preempt_disable();
> > +               rcu_read_lock();
> > +               retval = __skb_flow_bpf_dissect(prog, skb,
> > +                                               &flow_keys_dissector,
> > +                                               &flow_keys);
> > +               rcu_read_unlock();
> > +               preempt_enable();
> > +
> > +               if (need_resched()) {
> > +                       if (signal_pending(current))
> > +                               break;
> > +                       time_spent += ktime_get_ns() - time_start;
> > +                       cond_resched();
> > +                       time_start = ktime_get_ns();
> > +               }
> > +       }
> > +       time_spent += ktime_get_ns() - time_start;
> > +       do_div(time_spent, repeat);
> > +       duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
> > +
> > +       ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
> > +                             retval, duration);
> > +
> > +       kfree_skb(skb);
> > +       kfree(sk);
> > +       return ret;
> > +}
> > diff --git a/net/core/filter.c b/net/core/filter.c
> > index 2b3b436ef545..ff4641dae2be 100644
> > --- a/net/core/filter.c
> > +++ b/net/core/filter.c
> > @@ -7690,6 +7690,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
> >  };
> >
> >  const struct bpf_prog_ops flow_dissector_prog_ops = {
> > +       .test_run               = bpf_prog_test_run_flow_dissector,
> >  };
> >
> >  int sk_detach_filter(struct sock *sk)
> > --
> > 2.20.1.321.g9e740568ce-goog
> >
Song Liu Jan. 28, 2019, 5:24 p.m. UTC | #3
On Fri, Jan 25, 2019 at 4:56 PM Stanislav Fomichev <sdf@fomichev.me> wrote:
>
> On 01/25, Song Liu wrote:
> > On Thu, Jan 24, 2019 at 8:53 AM Stanislav Fomichev <sdf@google.com> wrote:
> > >
> > > The input is packet data, the output is struct bpf_flow_key. This should
> > > make it easy to test flow dissector programs without elaborate
> > > setup.
> > >
> > > Signed-off-by: Stanislav Fomichev <sdf@google.com>
> > > ---
> > >  include/linux/bpf.h |  3 ++
> > >  net/bpf/test_run.c  | 82 +++++++++++++++++++++++++++++++++++++++++++++
> > >  net/core/filter.c   |  1 +
> > >  3 files changed, 86 insertions(+)
> > >
> > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > > index e734f163bd0b..701ef954a258 100644
> > > --- a/include/linux/bpf.h
> > > +++ b/include/linux/bpf.h
> > > @@ -397,6 +397,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >                           union bpf_attr __user *uattr);
> > >  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >                           union bpf_attr __user *uattr);
> > > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > > +                                    const union bpf_attr *kattr,
> > > +                                    union bpf_attr __user *uattr);
> > >
> > >  /* an array of programs to be executed under rcu_lock.
> > >   *
> > > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> > > index fa2644d276ef..2c5172b33209 100644
> > > --- a/net/bpf/test_run.c
> > > +++ b/net/bpf/test_run.c
> > > @@ -240,3 +240,85 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >         kfree(data);
> > >         return ret;
> > >  }
> > > +
> > > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > > +                                    const union bpf_attr *kattr,
> > > +                                    union bpf_attr __user *uattr)
> >
> > I think this function duplicates a lot of logic from bpf_prog_test_run_skb().
> > Can we somehow reuse bpf_prog_test_run_skb()?
> I did that initially
> (https://marc.info/?l=linux-netdev&m=154830227529929&w=2), but then
> Alexey suggested that there is not much to reuse (plus can hinder the
> test performance of the existing types).

I see.

Acked-by: Song Liu <songliubraving@fb.com>

Thanks!

> >
> > Thanks,
> > Song
> >
> >
> > > +{
> > > +       u32 size = kattr->test.data_size_in;
> > > +       u32 repeat = kattr->test.repeat;
> > > +       struct bpf_flow_keys flow_keys;
> > > +       u64 time_start, time_spent = 0;
> > > +       struct bpf_skb_data_end *cb;
> > > +       u32 retval, duration;
> > > +       struct sk_buff *skb;
> > > +       struct sock *sk;
> > > +       void *data;
> > > +       int ret;
> > > +       u32 i;
> > > +
> > > +       if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
> > > +               return -EINVAL;
> > > +
> > > +       data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
> > > +                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
> > > +       if (IS_ERR(data))
> > > +               return PTR_ERR(data);
> > > +
> > > +       sk = kzalloc(sizeof(*sk), GFP_USER);
> > > +       if (!sk) {
> > > +               kfree(data);
> > > +               return -ENOMEM;
> > > +       }
> > > +       sock_net_set(sk, current->nsproxy->net_ns);
> > > +       sock_init_data(NULL, sk);
> > > +
> > > +       skb = build_skb(data, 0);
> > > +       if (!skb) {
> > > +               kfree(data);
> > > +               kfree(sk);
> > > +               return -ENOMEM;
> > > +       }
> > > +       skb->sk = sk;
> > > +
> > > +       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
> > > +       __skb_put(skb, size);
> > > +       skb->protocol = eth_type_trans(skb,
> > > +                                      current->nsproxy->net_ns->loopback_dev);
> > > +       skb_reset_network_header(skb);
> > > +
> > > +       cb = (struct bpf_skb_data_end *)skb->cb;
> > > +       cb->qdisc_cb.flow_keys = &flow_keys;
> > > +
> > > +       if (!repeat)
> > > +               repeat = 1;
> > > +
> > > +       time_start = ktime_get_ns();
> > > +       for (i = 0; i < repeat; i++) {
> > > +               preempt_disable();
> > > +               rcu_read_lock();
> > > +               retval = __skb_flow_bpf_dissect(prog, skb,
> > > +                                               &flow_keys_dissector,
> > > +                                               &flow_keys);
> > > +               rcu_read_unlock();
> > > +               preempt_enable();
> > > +
> > > +               if (need_resched()) {
> > > +                       if (signal_pending(current))
> > > +                               break;
> > > +                       time_spent += ktime_get_ns() - time_start;
> > > +                       cond_resched();
> > > +                       time_start = ktime_get_ns();
> > > +               }
> > > +       }
> > > +       time_spent += ktime_get_ns() - time_start;
> > > +       do_div(time_spent, repeat);
> > > +       duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
> > > +
> > > +       ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
> > > +                             retval, duration);
> > > +
> > > +       kfree_skb(skb);
> > > +       kfree(sk);
> > > +       return ret;
> > > +}
> > > diff --git a/net/core/filter.c b/net/core/filter.c
> > > index 2b3b436ef545..ff4641dae2be 100644
> > > --- a/net/core/filter.c
> > > +++ b/net/core/filter.c
> > > @@ -7690,6 +7690,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
> > >  };
> > >
> > >  const struct bpf_prog_ops flow_dissector_prog_ops = {
> > > +       .test_run               = bpf_prog_test_run_flow_dissector,
> > >  };
> > >
> > >  int sk_detach_filter(struct sock *sk)
> > > --
> > > 2.20.1.321.g9e740568ce-goog
> > >
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e734f163bd0b..701ef954a258 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -397,6 +397,9 @@  int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 			  union bpf_attr __user *uattr);
 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
 			  union bpf_attr __user *uattr);
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+				     const union bpf_attr *kattr,
+				     union bpf_attr __user *uattr);
 
 /* an array of programs to be executed under rcu_lock.
  *
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fa2644d276ef..2c5172b33209 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -240,3 +240,85 @@  int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 	kfree(data);
 	return ret;
 }
+
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+				     const union bpf_attr *kattr,
+				     union bpf_attr __user *uattr)
+{
+	u32 size = kattr->test.data_size_in;
+	u32 repeat = kattr->test.repeat;
+	struct bpf_flow_keys flow_keys;
+	u64 time_start, time_spent = 0;
+	struct bpf_skb_data_end *cb;
+	u32 retval, duration;
+	struct sk_buff *skb;
+	struct sock *sk;
+	void *data;
+	int ret;
+	u32 i;
+
+	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
+		return -EINVAL;
+
+	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
+			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	sk = kzalloc(sizeof(*sk), GFP_USER);
+	if (!sk) {
+		kfree(data);
+		return -ENOMEM;
+	}
+	sock_net_set(sk, current->nsproxy->net_ns);
+	sock_init_data(NULL, sk);
+
+	skb = build_skb(data, 0);
+	if (!skb) {
+		kfree(data);
+		kfree(sk);
+		return -ENOMEM;
+	}
+	skb->sk = sk;
+
+	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+	__skb_put(skb, size);
+	skb->protocol = eth_type_trans(skb,
+				       current->nsproxy->net_ns->loopback_dev);
+	skb_reset_network_header(skb);
+
+	cb = (struct bpf_skb_data_end *)skb->cb;
+	cb->qdisc_cb.flow_keys = &flow_keys;
+
+	if (!repeat)
+		repeat = 1;
+
+	time_start = ktime_get_ns();
+	for (i = 0; i < repeat; i++) {
+		preempt_disable();
+		rcu_read_lock();
+		retval = __skb_flow_bpf_dissect(prog, skb,
+						&flow_keys_dissector,
+						&flow_keys);
+		rcu_read_unlock();
+		preempt_enable();
+
+		if (need_resched()) {
+			if (signal_pending(current))
+				break;
+			time_spent += ktime_get_ns() - time_start;
+			cond_resched();
+			time_start = ktime_get_ns();
+		}
+	}
+	time_spent += ktime_get_ns() - time_start;
+	do_div(time_spent, repeat);
+	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
+
+	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
+			      retval, duration);
+
+	kfree_skb(skb);
+	kfree(sk);
+	return ret;
+}
diff --git a/net/core/filter.c b/net/core/filter.c
index 2b3b436ef545..ff4641dae2be 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7690,6 +7690,7 @@  const struct bpf_verifier_ops flow_dissector_verifier_ops = {
 };
 
 const struct bpf_prog_ops flow_dissector_prog_ops = {
+	.test_run		= bpf_prog_test_run_flow_dissector,
 };
 
 int sk_detach_filter(struct sock *sk)