From patchwork Fri Dec 21 07:53:32 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Steffen Klassert X-Patchwork-Id: 1017379 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=secunet.com Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 43Lgrw0cM3z9sLt for ; Fri, 21 Dec 2018 18:54:56 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731709AbeLUHxo (ORCPT ); Fri, 21 Dec 2018 02:53:44 -0500 Received: from a.mx.secunet.com ([62.96.220.36]:48908 "EHLO a.mx.secunet.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1731337AbeLUHxn (ORCPT ); Fri, 21 Dec 2018 02:53:43 -0500 Received: from localhost (localhost [127.0.0.1]) by a.mx.secunet.com (Postfix) with ESMTP id B91FD201D7; Fri, 21 Dec 2018 08:53:42 +0100 (CET) X-Virus-Scanned: by secunet Received: from a.mx.secunet.com ([127.0.0.1]) by localhost (a.mx.secunet.com [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id EXKNVtNBHJDF; Fri, 21 Dec 2018 08:53:42 +0100 (CET) Received: from mail-essen-01.secunet.de (mail-essen-01.secunet.de [10.53.40.204]) (using TLSv1 with cipher ECDHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by a.mx.secunet.com (Postfix) with ESMTPS id 4293C201B4; Fri, 21 Dec 2018 08:53:42 +0100 (CET) Received: from gauss2.secunet.de (10.182.7.193) by mail-essen-01.secunet.de (10.53.40.204) with Microsoft SMTP Server id 14.3.408.0; Fri, 21 Dec 2018 08:53:41 +0100 Received: by gauss2.secunet.de (Postfix, from userid 1000) id 5335E318270E; Fri, 21 Dec 2018 08:53:41 +0100 (CET) From: Steffen Klassert To: CC: Steffen Klassert , Willem de Bruijn , Paolo Abeni , "Jason A. Donenfeld" Subject: [PATCH RFC 1/3] net: Prepare GSO return values for fraglist GSO. Date: Fri, 21 Dec 2018 08:53:32 +0100 Message-ID: <20181221075334.9000-2-steffen.klassert@secunet.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20181221075334.9000-1-steffen.klassert@secunet.com> References: <20181221075334.9000-1-steffen.klassert@secunet.com> MIME-Version: 1.0 X-G-Data-MailSecurity-for-Exchange-State: 0 X-G-Data-MailSecurity-for-Exchange-Error: 0 X-G-Data-MailSecurity-for-Exchange-Sender: 23 X-G-Data-MailSecurity-for-Exchange-Server: d65e63f7-5c15-413f-8f63-c0d707471c93 X-EXCLAIMER-MD-CONFIG: 2c86f778-e09b-4440-8b15-867914633a10 X-G-Data-MailSecurity-for-Exchange-Guid: 171B3972-47D7-4E26-AC55-D996A3C050F9 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org On fraglist GSO, we don't need to clone the original skb. So we don't have anything to return to free. Prepare GSO that it frees the original skb only if the return pointer really changed. Fraglist GSO frees the original skb itself on error and returns -EREMOTE in this case. Signed-off-by: Steffen Klassert --- include/net/udp.h | 8 ++++++-- net/core/dev.c | 11 +++++++---- net/ipv4/ip_output.c | 3 ++- net/xfrm/xfrm_output.c | 3 ++- 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/include/net/udp.h b/include/net/udp.h index fd6d948755c8..f89b95c3f91e 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -482,11 +482,15 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk, atomic_add(segs_nr, &sk->sk_drops); SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr); - kfree_skb(skb); + + if (PTR_ERR(segs) != -EREMOTE) + kfree_skb(skb); return NULL; } - consume_skb(skb); + if (segs != skb) + consume_skb(skb); + return segs; } diff --git a/net/core/dev.c b/net/core/dev.c index 754284873355..53df5ac7c9b2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3030,7 +3030,8 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, } rcu_read_unlock(); - __skb_push(skb, skb->data - skb_mac_header(skb)); + if (segs != skb) + __skb_push(skb, skb->data - skb_mac_header(skb)); return segs; } @@ -3099,7 +3100,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, segs = skb_mac_gso_segment(skb, features); - if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) + if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) skb_warn_bad_offload(skb); return segs; @@ -3345,8 +3346,10 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device segs = skb_gso_segment(skb, features); if (IS_ERR(segs)) { - goto out_kfree_skb; - } else if (segs) { + if (PTR_ERR(segs) != -EREMOTE) + goto out_kfree_skb; + goto out_null; + } else if (segs && segs != skb) { consume_skb(skb); skb = segs; } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ab6618036afe..f4cecda6c1e8 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -272,7 +272,8 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk, return -ENOMEM; } - consume_skb(skb); + if (segs != skb) + consume_skb(skb); do { struct sk_buff *nskb = segs->next; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 4ae87c5ce2e3..1941dc2a80a0 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -183,7 +183,8 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET); segs = skb_gso_segment(skb, 0); - kfree_skb(skb); + if (segs != skb) + kfree_skb(skb); if (IS_ERR(segs)) return PTR_ERR(segs); if (segs == NULL) From patchwork Fri Dec 21 07:53:33 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Steffen Klassert X-Patchwork-Id: 1017381 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=secunet.com Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 43Lgrx4DZCz9sDr for ; Fri, 21 Dec 2018 18:54:57 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732181AbeLUHxp (ORCPT ); Fri, 21 Dec 2018 02:53:45 -0500 Received: from a.mx.secunet.com ([62.96.220.36]:48882 "EHLO a.mx.secunet.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1731366AbeLUHxp (ORCPT ); Fri, 21 Dec 2018 02:53:45 -0500 Received: from localhost (localhost [127.0.0.1]) by a.mx.secunet.com (Postfix) with ESMTP id 51A7A201E5; Fri, 21 Dec 2018 08:53:42 +0100 (CET) X-Virus-Scanned: by secunet Received: from a.mx.secunet.com ([127.0.0.1]) by localhost (a.mx.secunet.com [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id meVdfDbr95x7; Fri, 21 Dec 2018 08:53:41 +0100 (CET) Received: from mail-essen-01.secunet.de (mail-essen-01.secunet.de [10.53.40.204]) (using TLSv1 with cipher ECDHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by a.mx.secunet.com (Postfix) with ESMTPS id CEB50201BB; Fri, 21 Dec 2018 08:53:41 +0100 (CET) Received: from gauss2.secunet.de (10.182.7.193) by mail-essen-01.secunet.de (10.53.40.204) with Microsoft SMTP Server id 14.3.408.0; Fri, 21 Dec 2018 08:53:41 +0100 Received: by gauss2.secunet.de (Postfix, from userid 1000) id 56090318270F; Fri, 21 Dec 2018 08:53:41 +0100 (CET) From: Steffen Klassert To: CC: Steffen Klassert , Willem de Bruijn , Paolo Abeni , "Jason A. Donenfeld" Subject: [PATCH RFC 2/3] net: Support GRO/GSO fraglist chaining. Date: Fri, 21 Dec 2018 08:53:33 +0100 Message-ID: <20181221075334.9000-3-steffen.klassert@secunet.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20181221075334.9000-1-steffen.klassert@secunet.com> References: <20181221075334.9000-1-steffen.klassert@secunet.com> MIME-Version: 1.0 X-G-Data-MailSecurity-for-Exchange-State: 0 X-G-Data-MailSecurity-for-Exchange-Error: 0 X-G-Data-MailSecurity-for-Exchange-Sender: 23 X-G-Data-MailSecurity-for-Exchange-Server: d65e63f7-5c15-413f-8f63-c0d707471c93 X-EXCLAIMER-MD-CONFIG: 2c86f778-e09b-4440-8b15-867914633a10 X-G-Data-MailSecurity-for-Exchange-Guid: 31F8DFD3-7067-4543-8694-63AC5FD1994A Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This patch adds the core functions to chain/unchain GSO skbs at the frag_list pointer. This also adds a new GSO type SKB_GSO_FRAGLIST and a is_flist flag to napi_gro_cb which indicates that this flow will be GROed by fraglist chaining. Signed-off-by: Steffen Klassert --- include/linux/netdevice.h | 4 +- include/linux/skbuff.h | 4 ++ net/core/skbuff.c | 103 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 110 insertions(+), 1 deletion(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index fc6ba71513be..ae907cae6461 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2294,7 +2294,8 @@ struct napi_gro_cb { /* Number of gro_receive callbacks this packet already went through */ u8 recursion_counter:4; - /* 1 bit hole */ + /* GRO is done by frag_list pointer chaining. */ + u8 is_flist:1; /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@ -2648,6 +2649,7 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id); int netdev_get_name(struct net *net, char *name, int ifindex); int dev_restart(struct net_device *dev); int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); +int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb); static inline unsigned int skb_gro_offset(const struct sk_buff *skb) { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index b1831a5ca173..6a496c0dd0f1 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -578,6 +578,8 @@ enum { SKB_GSO_UDP = 1 << 16, SKB_GSO_UDP_L4 = 1 << 17, + + SKB_GSO_FRAGLIST = 1 << 18, }; #if BITS_PER_LONG > 32 @@ -3366,6 +3368,8 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet); bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); +struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features, + unsigned int offset); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); int skb_ensure_writable(struct sk_buff *skb, int write_len); int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 40552547c69a..9ff44a3a2625 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3464,6 +3464,109 @@ static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) return head_frag; } +struct sk_buff *skb_segment_list(struct sk_buff *skb, + netdev_features_t features, + unsigned int offset) +{ + struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; + unsigned int tnl_hlen = skb_tnl_header_len(skb); + unsigned int delta_truesize = 0; + unsigned int delta_len = 0; + struct sk_buff *tail = NULL; + struct sk_buff *nskb; + + skb_push(skb, -skb_network_offset(skb) + offset); + + skb_shinfo(skb)->frag_list = NULL; + + do { + nskb = list_skb; + list_skb = list_skb->next; + + if (!tail) + skb->next = nskb; + else + tail->next = nskb; + + tail = nskb; + + delta_len += nskb->len; + delta_truesize += nskb->truesize; + + skb_push(nskb, -skb_network_offset(nskb) + offset); + + if (!secpath_exists(nskb)) + nskb->sp = secpath_get(skb->sp); + + memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); + + nskb->ip_summed = CHECKSUM_UNNECESSARY; + nskb->tstamp = skb->tstamp; + nskb->dev = skb->dev; + nskb->queue_mapping = skb->queue_mapping; + + nskb->mac_len = skb->mac_len; + nskb->mac_header = skb->mac_header; + nskb->transport_header = skb->transport_header; + nskb->network_header = skb->network_header; + skb_dst_copy(nskb, skb); + + skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); + skb_copy_from_linear_data_offset(skb, -tnl_hlen, + nskb->data - tnl_hlen, + offset + tnl_hlen); + + if (skb_needs_linearize(nskb, features) && + __skb_linearize(nskb)) { + kfree_skb_list(skb); + return ERR_PTR(-EREMOTE); + } + } while (list_skb); + + skb->truesize = skb->truesize - delta_truesize; + skb->data_len = skb->data_len - delta_len; + skb->len = skb->len - delta_len; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb_gso_reset(skb); + + skb->prev = tail; + + if (skb_needs_linearize(skb, features) && + __skb_linearize(skb)) { + kfree_skb_list(skb); + return ERR_PTR(-EREMOTE); + } + + return skb; +} +EXPORT_SYMBOL_GPL(skb_segment_list); + +int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) +{ + if (unlikely(p->len + skb->len >= 65536)) + return -E2BIG; + + if (NAPI_GRO_CB(p)->last == p) + skb_shinfo(p)->frag_list = skb; + else + NAPI_GRO_CB(p)->last->next = skb; + + skb_pull(skb, skb_gro_offset(skb)); + + NAPI_GRO_CB(p)->last = skb; + NAPI_GRO_CB(p)->count++; + p->data_len += skb->len; + p->truesize += skb->truesize; + p->len += skb->len; + + NAPI_GRO_CB(skb)->same_flow = 1; + + return 0; +} +EXPORT_SYMBOL_GPL(skb_gro_receive_list); + /** * skb_segment - Perform protocol segmentation on skb. * @head_skb: buffer to segment From patchwork Fri Dec 21 07:53:34 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Steffen Klassert X-Patchwork-Id: 1017378 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=secunet.com Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 43Lgrt5Vbcz9sDN for ; Fri, 21 Dec 2018 18:54:54 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731895AbeLUHxo (ORCPT ); Fri, 21 Dec 2018 02:53:44 -0500 Received: from a.mx.secunet.com ([62.96.220.36]:48898 "EHLO a.mx.secunet.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1730712AbeLUHxn (ORCPT ); Fri, 21 Dec 2018 02:53:43 -0500 Received: from localhost (localhost [127.0.0.1]) by a.mx.secunet.com (Postfix) with ESMTP id A1D36201BB; Fri, 21 Dec 2018 08:53:42 +0100 (CET) X-Virus-Scanned: by secunet Received: from a.mx.secunet.com ([127.0.0.1]) by localhost (a.mx.secunet.com [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id VsINtz6ZKxWA; Fri, 21 Dec 2018 08:53:42 +0100 (CET) Received: from mail-essen-01.secunet.de (mail-essen-01.secunet.de [10.53.40.204]) (using TLSv1 with cipher ECDHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by a.mx.secunet.com (Postfix) with ESMTPS id 09ED1201D7; Fri, 21 Dec 2018 08:53:42 +0100 (CET) Received: from gauss2.secunet.de (10.182.7.193) by mail-essen-01.secunet.de (10.53.40.204) with Microsoft SMTP Server id 14.3.408.0; Fri, 21 Dec 2018 08:53:41 +0100 Received: by gauss2.secunet.de (Postfix, from userid 1000) id 5AB1B3182717; Fri, 21 Dec 2018 08:53:41 +0100 (CET) From: Steffen Klassert To: CC: Steffen Klassert , Willem de Bruijn , Paolo Abeni , "Jason A. Donenfeld" Subject: [PATCH RFC 3/3] udp: Support UDP fraglist GRO/GSO. Date: Fri, 21 Dec 2018 08:53:34 +0100 Message-ID: <20181221075334.9000-4-steffen.klassert@secunet.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20181221075334.9000-1-steffen.klassert@secunet.com> References: <20181221075334.9000-1-steffen.klassert@secunet.com> MIME-Version: 1.0 X-G-Data-MailSecurity-for-Exchange-State: 0 X-G-Data-MailSecurity-for-Exchange-Error: 0 X-G-Data-MailSecurity-for-Exchange-Sender: 23 X-G-Data-MailSecurity-for-Exchange-Server: d65e63f7-5c15-413f-8f63-c0d707471c93 X-EXCLAIMER-MD-CONFIG: 2c86f778-e09b-4440-8b15-867914633a10 X-G-Data-MailSecurity-for-Exchange-Guid: F975D9CC-3C3E-4874-9D8C-9718BDAA5979 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This patch extends UDP GRO to support fraglist GRO/GSO by using the previously introduced infrastructure. All UDP packets that are not targeted to a GRO capable UDP sockets are going to fraglist GRO now (local input and forward). Signed-off-by: Steffen Klassert --- net/ipv4/udp_offload.c | 57 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 49 insertions(+), 8 deletions(-) diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 0646d61f4fa8..9d77cc44da6b 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -187,6 +187,20 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, } EXPORT_SYMBOL(skb_udp_tunnel_segment); +static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb, + netdev_features_t features) +{ + unsigned int mss = skb_shinfo(skb)->gso_size; + + skb = skb_segment_list(skb, features, skb_mac_header_len(skb)); + if (IS_ERR(skb)) + return skb; + + udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss); + + return skb; +} + struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, netdev_features_t features) { @@ -199,6 +213,9 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, __sum16 check; __be16 newlen; + if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) + return __udp_gso_segment_list(gso_skb, features); + mss = skb_shinfo(gso_skb)->gso_size; if (gso_skb->len <= sizeof(*uh) + mss) return ERR_PTR(-EINVAL); @@ -351,16 +368,15 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head, struct sk_buff *pp = NULL; struct udphdr *uh2; struct sk_buff *p; + int ret; /* requires non zero csum, for symmetry with GSO */ if (!uh->check) { NAPI_GRO_CB(skb)->flush = 1; return NULL; } - /* pull encapsulating udp header */ skb_gro_pull(skb, sizeof(struct udphdr)); - skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) @@ -378,8 +394,17 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head, * Under small packet flood GRO count could elsewhere grow a lot * leading to execessive truesize values */ - if (!skb_gro_receive(p, skb) && - NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX) + if (NAPI_GRO_CB(skb)->is_flist) { + if (!pskb_may_pull(skb, skb_gro_offset(skb))) + return NULL; + ret = skb_gro_receive_list(p, skb); + } else { + skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); + + ret = skb_gro_receive(p, skb); + } + + if (!ret && NAPI_GRO_CB(p)->count > UDP_GRO_CNT_MAX) pp = p; else if (uh->len != uh2->len) pp = p; @@ -403,10 +428,17 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, rcu_read_lock(); sk = (*lookup)(skb, uh->source, uh->dest); - if (!sk) - goto out_unlock; + if (!sk) { + NAPI_GRO_CB(skb)->is_flist = 1; + pp = call_gro_receive(udp_gro_receive_segment, head, skb); + rcu_read_unlock(); + return pp; + } + + if (!udp_sk(sk)->gro_receive) { + if (!udp_sk(sk)->gro_enabled) + NAPI_GRO_CB(skb)->is_flist = 1; - if (udp_sk(sk)->gro_enabled) { pp = call_gro_receive(udp_gro_receive_segment, head, skb); rcu_read_unlock(); return pp; @@ -456,7 +488,7 @@ static struct sk_buff *udp4_gro_receive(struct list_head *head, { struct udphdr *uh = udp_gro_udphdr(skb); - if (unlikely(!uh) || !static_branch_unlikely(&udp_encap_needed_key)) + if (unlikely(!uh)) goto flush; /* Don't bother verifying checksum if we're going to flush anyway. */ @@ -530,6 +562,15 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff) const struct iphdr *iph = ip_hdr(skb); struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); + if (NAPI_GRO_CB(skb)->is_flist) { + uh->len = htons(skb->len - nhoff); + + skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4); + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; + + return 0; + } + if (uh->check) uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, iph->daddr, 0);