Message ID | 20190920044905.31759-2-steffen.klassert@secunet.com |
---|---|
State | RFC |
Delegated to: | David Miller |
Headers | show |
Series | Support fraglist GRO/GSO | expand |
On Fri, Sep 20, 2019 at 12:49 AM Steffen Klassert <steffen.klassert@secunet.com> wrote: > > This patch enables UDP GRO regardless if a GRO capable > socket is present. With this GRO is done by default > for the local input and forwarding path. > > Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com> > struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, > diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c > index a3908e55ed89..929b12fc7bc5 100644 > --- a/net/ipv4/udp_offload.c > +++ b/net/ipv4/udp_offload.c > @@ -401,36 +401,25 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head, > return NULL; > } > > -INDIRECT_CALLABLE_DECLARE(struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, > - __be16 sport, __be16 dport)); > struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, > - struct udphdr *uh, udp_lookup_t lookup) > + struct udphdr *uh, struct sock *sk) > { > struct sk_buff *pp = NULL; > struct sk_buff *p; > struct udphdr *uh2; > unsigned int off = skb_gro_offset(skb); > int flush = 1; > - struct sock *sk; > > - rcu_read_lock(); > - sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb, > - udp4_lib_lookup_skb, skb, uh->source, uh->dest); > - if (!sk) > - goto out_unlock; > - > - if (udp_sk(sk)->gro_enabled) { > + if (!sk || !udp_sk(sk)->gro_receive) { Not critical, but the use of sk->gro_enabled and sk->gro_receive to signal whether sockets are willing to accept large packets or are udp tunnels, respectively, is subtle and possibly confusing. Wrappers udp_sock_is_tunnel and udp_sock_accepts_gso could perhaps help document the logic a bit. static inline bool udp_sock_is_tunnel(struct udp_sock *up) { return up->gro_receive; } And perhaps only pass a non-zero sk to udp_gro_receive if it is a tunnel and thus skips the new default path: static inline struct sock *sk = udp4_lookup_tunnel(const struct sk_buff *skb, __be16 sport, __be16_dport) { struct sock *sk; if (!static_branch_unlikely(&udp_encap_needed_key)) return NULL; rcu_read_lock(); sk = udp4_lib_lookup_skb(skb, source, dest); rcu_read_unlock(); return udp_sock_is_tunnel(udp_sk(sk)) ? sk : NULL; } > pp = call_gro_receive(udp_gro_receive_segment, head, skb); > - rcu_read_unlock(); > return pp; > } Just a suggestion. It may be too verbose as given. > @@ -468,8 +456,10 @@ INDIRECT_CALLABLE_SCOPE > struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) > { > struct udphdr *uh = udp_gro_udphdr(skb); > + struct sk_buff *pp; > + struct sock *sk; > > - if (unlikely(!uh) || !static_branch_unlikely(&udp_encap_needed_key)) > + if (unlikely(!uh)) > goto flush; > > /* Don't bother verifying checksum if we're going to flush anyway. */ > @@ -484,7 +474,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) > inet_gro_compute_pseudo); > skip: > NAPI_GRO_CB(skb)->is_ipv6 = 0; > - return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb); > + rcu_read_lock(); > + sk = static_branch_unlikely(&udp_encap_needed_key) ? udp4_lib_lookup_skb(skb, uh->source, uh->dest) : NULL; > + pp = udp_gro_receive(head, skb, uh, sk); > + rcu_read_unlock(); > + return pp;
On Mon, Sep 23, 2019 at 8:53 AM Willem de Bruijn <willemdebruijn.kernel@gmail.com> wrote: > > On Fri, Sep 20, 2019 at 12:49 AM Steffen Klassert > <steffen.klassert@secunet.com> wrote: > > > > This patch enables UDP GRO regardless if a GRO capable > > socket is present. With this GRO is done by default > > for the local input and forwarding path. > > > > Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com> > > struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, > > diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c > > index a3908e55ed89..929b12fc7bc5 100644 > > --- a/net/ipv4/udp_offload.c > > +++ b/net/ipv4/udp_offload.c > > @@ -401,36 +401,25 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head, > > return NULL; > > } > > > > -INDIRECT_CALLABLE_DECLARE(struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, > > - __be16 sport, __be16 dport)); > > struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, > > - struct udphdr *uh, udp_lookup_t lookup) > > + struct udphdr *uh, struct sock *sk) > > { > > struct sk_buff *pp = NULL; > > struct sk_buff *p; > > struct udphdr *uh2; > > unsigned int off = skb_gro_offset(skb); > > int flush = 1; > > - struct sock *sk; > > > > - rcu_read_lock(); > > - sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb, > > - udp4_lib_lookup_skb, skb, uh->source, uh->dest); > > - if (!sk) > > - goto out_unlock; > > - > > - if (udp_sk(sk)->gro_enabled) { > > + if (!sk || !udp_sk(sk)->gro_receive) { > > Not critical, but the use of sk->gro_enabled and sk->gro_receive to > signal whether sockets are willing to accept large packets or are udp > tunnels, respectively, is subtle and possibly confusing. > > Wrappers udp_sock_is_tunnel and udp_sock_accepts_gso could perhaps > help document the logic a bit. > > static inline bool udp_sock_is_tunnel(struct udp_sock *up) > { > return up->gro_receive; > } > > And perhaps only pass a non-zero sk to udp_gro_receive if it is a > tunnel and thus skips the new default path: > > static inline struct sock *sk = udp4_lookup_tunnel(const struct > sk_buff *skb, __be16 sport, __be16_dport) > { > struct sock *sk; > > if (!static_branch_unlikely(&udp_encap_needed_key)) > return NULL; > > rcu_read_lock(); > sk = udp4_lib_lookup_skb(skb, source, dest); > rcu_read_unlock(); > > return udp_sock_is_tunnel(udp_sk(sk)) ? sk : NULL; > } > > > pp = call_gro_receive(udp_gro_receive_segment, head, skb); > > - rcu_read_unlock(); > > return pp; > > } > > Just a suggestion. It may be too verbose as given. .. and buggy, as it does not even test for NULL sk ;)
diff --git a/include/net/udp.h b/include/net/udp.h index bad74f780831..44e0e52b585c 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -167,7 +167,7 @@ typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport, __be16 dport); struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, - struct udphdr *uh, udp_lookup_t lookup); + struct udphdr *uh, struct sock *sk); int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index a3908e55ed89..929b12fc7bc5 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -401,36 +401,25 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head, return NULL; } -INDIRECT_CALLABLE_DECLARE(struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, - __be16 sport, __be16 dport)); struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, - struct udphdr *uh, udp_lookup_t lookup) + struct udphdr *uh, struct sock *sk) { struct sk_buff *pp = NULL; struct sk_buff *p; struct udphdr *uh2; unsigned int off = skb_gro_offset(skb); int flush = 1; - struct sock *sk; - rcu_read_lock(); - sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb, - udp4_lib_lookup_skb, skb, uh->source, uh->dest); - if (!sk) - goto out_unlock; - - if (udp_sk(sk)->gro_enabled) { + if (!sk || !udp_sk(sk)->gro_receive) { pp = call_gro_receive(udp_gro_receive_segment, head, skb); - rcu_read_unlock(); return pp; } if (NAPI_GRO_CB(skb)->encap_mark || (skb->ip_summed != CHECKSUM_PARTIAL && NAPI_GRO_CB(skb)->csum_cnt == 0 && - !NAPI_GRO_CB(skb)->csum_valid) || - !udp_sk(sk)->gro_receive) - goto out_unlock; + !NAPI_GRO_CB(skb)->csum_valid)) + goto out; /* mark that this skb passed once through the tunnel gro layer */ NAPI_GRO_CB(skb)->encap_mark = 1; @@ -457,8 +446,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); -out_unlock: - rcu_read_unlock(); +out: skb_gro_flush_final(skb, pp, flush); return pp; } @@ -468,8 +456,10 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) { struct udphdr *uh = udp_gro_udphdr(skb); + struct sk_buff *pp; + struct sock *sk; - if (unlikely(!uh) || !static_branch_unlikely(&udp_encap_needed_key)) + if (unlikely(!uh)) goto flush; /* Don't bother verifying checksum if we're going to flush anyway. */ @@ -484,7 +474,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) inet_gro_compute_pseudo); skip: NAPI_GRO_CB(skb)->is_ipv6 = 0; - return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb); + rcu_read_lock(); + sk = static_branch_unlikely(&udp_encap_needed_key) ? udp4_lib_lookup_skb(skb, uh->source, uh->dest) : NULL; + pp = udp_gro_receive(head, skb, uh, sk); + rcu_read_unlock(); + return pp; flush: NAPI_GRO_CB(skb)->flush = 1; @@ -517,9 +511,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff, rcu_read_lock(); sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb, udp4_lib_lookup_skb, skb, uh->source, uh->dest); - if (sk && udp_sk(sk)->gro_enabled) { - err = udp_gro_complete_segment(skb); - } else if (sk && udp_sk(sk)->gro_complete) { + if (sk && udp_sk(sk)->gro_complete) { skb_shinfo(skb)->gso_type = uh->check ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; @@ -529,6 +521,8 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff, skb->encapsulation = 1; err = udp_sk(sk)->gro_complete(sk, skb, nhoff + sizeof(struct udphdr)); + } else { + err = udp_gro_complete_segment(skb); } rcu_read_unlock(); diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 64b8f05d6735..435cfbadb6bd 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -115,8 +115,10 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb) { struct udphdr *uh = udp_gro_udphdr(skb); + struct sk_buff *pp; + struct sock *sk; - if (unlikely(!uh) || !static_branch_unlikely(&udpv6_encap_needed_key)) + if (unlikely(!uh)) goto flush; /* Don't bother verifying checksum if we're going to flush anyway. */ @@ -132,7 +134,11 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb) skip: NAPI_GRO_CB(skb)->is_ipv6 = 1; - return udp_gro_receive(head, skb, uh, udp6_lib_lookup_skb); + rcu_read_lock(); + sk = static_branch_unlikely(&udpv6_encap_needed_key) ? udp6_lib_lookup_skb(skb, uh->source, uh->dest) : NULL; + pp = udp_gro_receive(head, skb, uh, sk); + rcu_read_unlock(); + return pp; flush: NAPI_GRO_CB(skb)->flush = 1;
This patch enables UDP GRO regardless if a GRO capable socket is present. With this GRO is done by default for the local input and forwarding path. Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com> --- include/net/udp.h | 2 +- net/ipv4/udp_offload.c | 38 ++++++++++++++++---------------------- net/ipv6/udp_offload.c | 10 ++++++++-- 3 files changed, 25 insertions(+), 25 deletions(-)