diff mbox series

[bpf-next,2/4] bpf: Split bpf_sk_lookup

Message ID 497a6a47655e647ef51d127b4d5ca2cea0d1d268.1541695683.git.rdna@fb.com
State Changes Requested, archived
Delegated to: BPF Maintainers
Headers show
Series bpf: Support socket lookup in CGROUP_SOCK_ADDR progs | expand

Commit Message

Andrey Ignatov Nov. 8, 2018, 4:54 p.m. UTC
Split bpf_sk_lookup to separate core functionality, that can be reused
to make socket lookup available to more program types, from
functionality specific to program types that have access to skb.

Core functionality is placed to __bpf_sk_lookup. And bpf_sk_lookup only
gets caller netns and ifindex from skb and passes it to __bpf_sk_lookup.

Program types that don't have access to skb can just pass NULL to
__bpf_sk_lookup that will be handled correctly by both inet{,6}_sdif and
lookup functions.

This is refactoring that simply moves blocks around and does NOT change
existing logic.

Signed-off-by: Andrey Ignatov <rdna@fb.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
---
 net/core/filter.c | 38 +++++++++++++++++++++++---------------
 1 file changed, 23 insertions(+), 15 deletions(-)

Comments

Martin KaFai Lau Nov. 9, 2018, 5:19 p.m. UTC | #1
On Thu, Nov 08, 2018 at 08:54:23AM -0800, Andrey Ignatov wrote:
> Split bpf_sk_lookup to separate core functionality, that can be reused
> to make socket lookup available to more program types, from
> functionality specific to program types that have access to skb.
> 
> Core functionality is placed to __bpf_sk_lookup. And bpf_sk_lookup only
> gets caller netns and ifindex from skb and passes it to __bpf_sk_lookup.
> 
> Program types that don't have access to skb can just pass NULL to
> __bpf_sk_lookup that will be handled correctly by both inet{,6}_sdif and
> lookup functions.
> 
> This is refactoring that simply moves blocks around and does NOT change
> existing logic.
> 
> Signed-off-by: Andrey Ignatov <rdna@fb.com>
> Acked-by: Alexei Starovoitov <ast@kernel.org>
> ---
>  net/core/filter.c | 38 +++++++++++++++++++++++---------------
>  1 file changed, 23 insertions(+), 15 deletions(-)
> 
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 9a1327eb25fa..dc0f86a707b7 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -4825,14 +4825,10 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
>  
>  #ifdef CONFIG_INET
>  static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
> -			      struct sk_buff *skb, u8 family, u8 proto)
> +			      struct sk_buff *skb, u8 family, u8 proto, int dif)
>  {
>  	bool refcounted = false;
>  	struct sock *sk = NULL;
> -	int dif = 0;
> -
> -	if (skb->dev)
> -		dif = skb->dev->ifindex;
>  
>  	if (family == AF_INET) {
>  		__be32 src4 = tuple->ipv4.saddr;
> @@ -4875,16 +4871,16 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
>  	return sk;
>  }
>  
> -/* bpf_sk_lookup performs the core lookup for different types of sockets,
> +/* __bpf_sk_lookup performs the core lookup for different types of sockets,
>   * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
>   * Returns the socket as an 'unsigned long' to simplify the casting in the
>   * callers to satisfy BPF_CALL declarations.
>   */
>  static unsigned long
> -bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
> -	      u8 proto, u64 netns_id, u64 flags)
> +__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
> +		u8 proto, u64 netns_id, struct net *caller_net, int ifindex,
> +		u64 flags)
That looks a bit different from the one landed to bpf-next.
You may need to respin the set.

>  {
> -	struct net *caller_net;
>  	struct sock *sk = NULL;
>  	u8 family = AF_UNSPEC;
>  	struct net *net;
> @@ -4893,19 +4889,15 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
>  	if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags))
>  		goto out;
>  
> -	if (skb->dev)
> -		caller_net = dev_net(skb->dev);
> -	else
> -		caller_net = sock_net(skb->sk);
>  	if (netns_id) {
>  		net = get_net_ns_by_id(caller_net, netns_id);
>  		if (unlikely(!net))
>  			goto out;
> -		sk = sk_lookup(net, tuple, skb, family, proto);
> +		sk = sk_lookup(net, tuple, skb, family, proto, ifindex);
>  		put_net(net);
>  	} else {
>  		net = caller_net;
> -		sk = sk_lookup(net, tuple, skb, family, proto);
> +		sk = sk_lookup(net, tuple, skb, family, proto, ifindex);
>  	}
>  
>  	if (sk)
> @@ -4914,6 +4906,22 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
>  	return (unsigned long) sk;
>  }
>  
> +static unsigned long
> +bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
> +	      u8 proto, u64 netns_id, u64 flags)
> +{
> +	struct net *caller_net = sock_net(skb->sk);
> +	int ifindex = 0;
> +
> +	if (skb->dev) {
> +		caller_net = dev_net(skb->dev);
> +		ifindex = skb->dev->ifindex;
> +	}
> +
> +	return __bpf_sk_lookup(skb, tuple, len, proto, netns_id, caller_net,
> +			       ifindex, flags);
> +}
> +
>  BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
>  	   struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
>  {
> -- 
> 2.17.1
>
Andrey Ignatov Nov. 9, 2018, 5:30 p.m. UTC | #2
Martin Lau <kafai@fb.com> [Fri, 2018-11-09 09:19 -0800]:
> On Thu, Nov 08, 2018 at 08:54:23AM -0800, Andrey Ignatov wrote:
> > Split bpf_sk_lookup to separate core functionality, that can be reused
> > to make socket lookup available to more program types, from
> > functionality specific to program types that have access to skb.
> > 
> > Core functionality is placed to __bpf_sk_lookup. And bpf_sk_lookup only
> > gets caller netns and ifindex from skb and passes it to __bpf_sk_lookup.
> > 
> > Program types that don't have access to skb can just pass NULL to
> > __bpf_sk_lookup that will be handled correctly by both inet{,6}_sdif and
> > lookup functions.
> > 
> > This is refactoring that simply moves blocks around and does NOT change
> > existing logic.
> > 
> > Signed-off-by: Andrey Ignatov <rdna@fb.com>
> > Acked-by: Alexei Starovoitov <ast@kernel.org>
> > ---
> >  net/core/filter.c | 38 +++++++++++++++++++++++---------------
> >  1 file changed, 23 insertions(+), 15 deletions(-)
> > 
> > diff --git a/net/core/filter.c b/net/core/filter.c
> > index 9a1327eb25fa..dc0f86a707b7 100644
> > --- a/net/core/filter.c
> > +++ b/net/core/filter.c
> > @@ -4825,14 +4825,10 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
> >  
> >  #ifdef CONFIG_INET
> >  static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
> > -			      struct sk_buff *skb, u8 family, u8 proto)
> > +			      struct sk_buff *skb, u8 family, u8 proto, int dif)
> >  {
> >  	bool refcounted = false;
> >  	struct sock *sk = NULL;
> > -	int dif = 0;
> > -
> > -	if (skb->dev)
> > -		dif = skb->dev->ifindex;
> >  
> >  	if (family == AF_INET) {
> >  		__be32 src4 = tuple->ipv4.saddr;
> > @@ -4875,16 +4871,16 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
> >  	return sk;
> >  }
> >  
> > -/* bpf_sk_lookup performs the core lookup for different types of sockets,
> > +/* __bpf_sk_lookup performs the core lookup for different types of sockets,
> >   * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
> >   * Returns the socket as an 'unsigned long' to simplify the casting in the
> >   * callers to satisfy BPF_CALL declarations.
> >   */
> >  static unsigned long
> > -bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
> > -	      u8 proto, u64 netns_id, u64 flags)
> > +__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
> > +		u8 proto, u64 netns_id, struct net *caller_net, int ifindex,
> > +		u64 flags)
> That looks a bit different from the one landed to bpf-next.
> You may need to respin the set.

Since Nitin's version is landed now, I'll rebase on top of it and this
patch just won't be needed (initially I did it to unblock myself).

I'll also address the nit in patch 3 and send v2 with both changes.

Thanks Martin!

> >  {
> > -	struct net *caller_net;
> >  	struct sock *sk = NULL;
> >  	u8 family = AF_UNSPEC;
> >  	struct net *net;
> > @@ -4893,19 +4889,15 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
> >  	if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags))
> >  		goto out;
> >  
> > -	if (skb->dev)
> > -		caller_net = dev_net(skb->dev);
> > -	else
> > -		caller_net = sock_net(skb->sk);
> >  	if (netns_id) {
> >  		net = get_net_ns_by_id(caller_net, netns_id);
> >  		if (unlikely(!net))
> >  			goto out;
> > -		sk = sk_lookup(net, tuple, skb, family, proto);
> > +		sk = sk_lookup(net, tuple, skb, family, proto, ifindex);
> >  		put_net(net);
> >  	} else {
> >  		net = caller_net;
> > -		sk = sk_lookup(net, tuple, skb, family, proto);
> > +		sk = sk_lookup(net, tuple, skb, family, proto, ifindex);
> >  	}
> >  
> >  	if (sk)
> > @@ -4914,6 +4906,22 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
> >  	return (unsigned long) sk;
> >  }
> >  
> > +static unsigned long
> > +bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
> > +	      u8 proto, u64 netns_id, u64 flags)
> > +{
> > +	struct net *caller_net = sock_net(skb->sk);
> > +	int ifindex = 0;
> > +
> > +	if (skb->dev) {
> > +		caller_net = dev_net(skb->dev);
> > +		ifindex = skb->dev->ifindex;
> > +	}
> > +
> > +	return __bpf_sk_lookup(skb, tuple, len, proto, netns_id, caller_net,
> > +			       ifindex, flags);
> > +}
> > +
> >  BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
> >  	   struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
> >  {
> > -- 
> > 2.17.1
> >
diff mbox series

Patch

diff --git a/net/core/filter.c b/net/core/filter.c
index 9a1327eb25fa..dc0f86a707b7 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4825,14 +4825,10 @@  static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
 
 #ifdef CONFIG_INET
 static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
-			      struct sk_buff *skb, u8 family, u8 proto)
+			      struct sk_buff *skb, u8 family, u8 proto, int dif)
 {
 	bool refcounted = false;
 	struct sock *sk = NULL;
-	int dif = 0;
-
-	if (skb->dev)
-		dif = skb->dev->ifindex;
 
 	if (family == AF_INET) {
 		__be32 src4 = tuple->ipv4.saddr;
@@ -4875,16 +4871,16 @@  static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
 	return sk;
 }
 
-/* bpf_sk_lookup performs the core lookup for different types of sockets,
+/* __bpf_sk_lookup performs the core lookup for different types of sockets,
  * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
  * Returns the socket as an 'unsigned long' to simplify the casting in the
  * callers to satisfy BPF_CALL declarations.
  */
 static unsigned long
-bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
-	      u8 proto, u64 netns_id, u64 flags)
+__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+		u8 proto, u64 netns_id, struct net *caller_net, int ifindex,
+		u64 flags)
 {
-	struct net *caller_net;
 	struct sock *sk = NULL;
 	u8 family = AF_UNSPEC;
 	struct net *net;
@@ -4893,19 +4889,15 @@  bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
 	if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags))
 		goto out;
 
-	if (skb->dev)
-		caller_net = dev_net(skb->dev);
-	else
-		caller_net = sock_net(skb->sk);
 	if (netns_id) {
 		net = get_net_ns_by_id(caller_net, netns_id);
 		if (unlikely(!net))
 			goto out;
-		sk = sk_lookup(net, tuple, skb, family, proto);
+		sk = sk_lookup(net, tuple, skb, family, proto, ifindex);
 		put_net(net);
 	} else {
 		net = caller_net;
-		sk = sk_lookup(net, tuple, skb, family, proto);
+		sk = sk_lookup(net, tuple, skb, family, proto, ifindex);
 	}
 
 	if (sk)
@@ -4914,6 +4906,22 @@  bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
 	return (unsigned long) sk;
 }
 
+static unsigned long
+bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+	      u8 proto, u64 netns_id, u64 flags)
+{
+	struct net *caller_net = sock_net(skb->sk);
+	int ifindex = 0;
+
+	if (skb->dev) {
+		caller_net = dev_net(skb->dev);
+		ifindex = skb->dev->ifindex;
+	}
+
+	return __bpf_sk_lookup(skb, tuple, len, proto, netns_id, caller_net,
+			       ifindex, flags);
+}
+
 BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
 	   struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
 {