diff mbox series

[RFC,v2,2/3] net: Support GRO/GSO fraglist chaining.

Message ID 20190128085025.14532-3-steffen.klassert@secunet.com
State RFC
Delegated to: David Miller
Headers show
Series Support fraglist GRO/GSO | expand

Commit Message

Steffen Klassert Jan. 28, 2019, 8:50 a.m. UTC
This patch adds the core functions to chain/unchain
GSO skbs at the frag_list pointer. This also adds
a new GSO type SKB_GSO_FRAGLIST and a is_flist
flag to napi_gro_cb which indicates that this
flow will be GROed by fraglist chaining.

Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
---
 include/linux/netdevice.h |   4 +-
 include/linux/skbuff.h    |   4 ++
 net/core/dev.c            |   2 +-
 net/core/skbuff.c         | 106 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 114 insertions(+), 2 deletions(-)

Comments

Willem de Bruijn Jan. 28, 2019, 8:50 p.m. UTC | #1
On Mon, Jan 28, 2019 at 2:53 AM Steffen Klassert
<steffen.klassert@secunet.com> wrote:
>
> This patch adds the core functions to chain/unchain
> GSO skbs at the frag_list pointer. This also adds
> a new GSO type SKB_GSO_FRAGLIST and a is_flist
> flag to napi_gro_cb which indicates that this
> flow will be GROed by fraglist chaining.
>
> Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>

> +struct sk_buff *skb_segment_list(struct sk_buff *skb,
> +                                netdev_features_t features,
> +                                unsigned int offset)
> +{
> +       struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
> +       unsigned int tnl_hlen = skb_tnl_header_len(skb);
> +       unsigned int delta_truesize = 0;
> +       unsigned int delta_len = 0;
> +       struct sk_buff *tail = NULL;
> +       struct sk_buff *nskb;
> +
> +       skb_push(skb, -skb_network_offset(skb) + offset);
> +
> +       skb_shinfo(skb)->frag_list = NULL;
> +
> +       do {
> +               nskb = list_skb;
> +               list_skb = list_skb->next;
> +
> +               if (!tail)
> +                       skb->next = nskb;
> +               else
> +                       tail->next = nskb;
> +
> +               tail = nskb;
> +
> +               delta_len += nskb->len;
> +               delta_truesize += nskb->truesize;
> +
> +               skb_push(nskb, -skb_network_offset(nskb) + offset);
> +
> +               if (!secpath_exists(nskb))
> +                       __skb_ext_copy(nskb, skb);
> +
> +               memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
> +
> +               nskb->ip_summed = CHECKSUM_NONE;
> +               nskb->csum_valid = 1;
> +               nskb->tstamp = skb->tstamp;
> +               nskb->dev = skb->dev;
> +               nskb->queue_mapping = skb->queue_mapping;
> +
> +               nskb->mac_len = skb->mac_len;
> +               nskb->mac_header = skb->mac_header;
> +               nskb->transport_header = skb->transport_header;
> +               nskb->network_header = skb->network_header;
> +               skb_dst_copy(nskb, skb);
> +
> +               skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
> +               skb_copy_from_linear_data_offset(skb, -tnl_hlen,
> +                                                nskb->data - tnl_hlen,
> +                                                offset + tnl_hlen);
> +
> +               if (skb_needs_linearize(nskb, features) &&
> +                   __skb_linearize(nskb)) {
> +                       kfree_skb_list(skb->next);
> +                       skb->next = NULL;
> +                       return ERR_PTR(-ENOMEM);
> +               }
> +       } while (list_skb);
> +
> +       skb->truesize = skb->truesize - delta_truesize;
> +       skb->data_len = skb->data_len - delta_len;
> +       skb->len = skb->len - delta_len;
> +
> +       skb_gso_reset(skb);
> +
> +       skb->prev = tail;
> +
> +       if (skb_needs_linearize(skb, features) &&
> +           __skb_linearize(skb)) {
> +               skb->next = NULL;
> +               kfree_skb_list(skb->next);

inverse order

also, I would probably deduplicate with the same branch above in a new
err_linearize: block
Steffen Klassert Feb. 13, 2019, 11:49 a.m. UTC | #2
On Mon, Jan 28, 2019 at 02:50:34PM -0600, Willem de Bruijn wrote:
> On Mon, Jan 28, 2019 at 2:53 AM Steffen Klassert
> <steffen.klassert@secunet.com> wrote:
> > +
> > +       if (skb_needs_linearize(skb, features) &&
> > +           __skb_linearize(skb)) {
> > +               skb->next = NULL;
> > +               kfree_skb_list(skb->next);
> 
> inverse order

Oh yes, apparently.

> 
> also, I would probably deduplicate with the same branch above in a new
> err_linearize: block

Will do that.

Thanks for the review!
diff mbox series

Patch

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1377d085ef99..050cff782fbc 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2300,7 +2300,8 @@  struct napi_gro_cb {
 	/* Number of gro_receive callbacks this packet already went through */
 	u8 recursion_counter:4;
 
-	/* 1 bit hole */
+	/* GRO is done by frag_list pointer chaining. */
+	u8	is_flist:1;
 
 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
 	__wsum	csum;
@@ -2660,6 +2661,7 @@  struct net_device *dev_get_by_napi_id(unsigned int napi_id);
 int netdev_get_name(struct net *net, char *name, int ifindex);
 int dev_restart(struct net_device *dev);
 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
 
 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
 {
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2a57a365c711..b35a209c9c55 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -578,6 +578,8 @@  enum {
 	SKB_GSO_UDP = 1 << 16,
 
 	SKB_GSO_UDP_L4 = 1 << 17,
+
+	SKB_GSO_FRAGLIST = 1 << 18,
 };
 
 #if BITS_PER_LONG > 32
@@ -3369,6 +3371,8 @@  void skb_scrub_packet(struct sk_buff *skb, bool xnet);
 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
+struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
+				 unsigned int offset);
 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
 int skb_ensure_writable(struct sk_buff *skb, int write_len);
 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
diff --git a/net/core/dev.c b/net/core/dev.c
index 1b5a4410be0e..90b480b5bdf6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3101,7 +3101,7 @@  struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
 
 	segs = skb_mac_gso_segment(skb, features);
 
-	if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
+	if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
 		skb_warn_bad_offload(skb);
 
 	return segs;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 37317ffec146..7cd5e9da21bd 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3462,6 +3462,112 @@  static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
 	return head_frag;
 }
 
+struct sk_buff *skb_segment_list(struct sk_buff *skb,
+				 netdev_features_t features,
+				 unsigned int offset)
+{
+	struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
+	unsigned int tnl_hlen = skb_tnl_header_len(skb);
+	unsigned int delta_truesize = 0;
+	unsigned int delta_len = 0;
+	struct sk_buff *tail = NULL;
+	struct sk_buff *nskb;
+
+	skb_push(skb, -skb_network_offset(skb) + offset);
+
+	skb_shinfo(skb)->frag_list = NULL;
+
+	do {
+		nskb = list_skb;
+		list_skb = list_skb->next;
+
+		if (!tail)
+			skb->next = nskb;
+		else
+			tail->next = nskb;
+
+		tail = nskb;
+
+		delta_len += nskb->len;
+		delta_truesize += nskb->truesize;
+
+		skb_push(nskb, -skb_network_offset(nskb) + offset);
+
+		if (!secpath_exists(nskb))
+			__skb_ext_copy(nskb, skb);
+
+		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
+
+		nskb->ip_summed = CHECKSUM_NONE;
+		nskb->csum_valid = 1;
+		nskb->tstamp = skb->tstamp;
+		nskb->dev = skb->dev;
+		nskb->queue_mapping = skb->queue_mapping;
+
+		nskb->mac_len = skb->mac_len;
+		nskb->mac_header = skb->mac_header;
+		nskb->transport_header = skb->transport_header;
+		nskb->network_header = skb->network_header;
+		skb_dst_copy(nskb, skb);
+
+		skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
+		skb_copy_from_linear_data_offset(skb, -tnl_hlen,
+						 nskb->data - tnl_hlen,
+						 offset + tnl_hlen);
+
+		if (skb_needs_linearize(nskb, features) &&
+		    __skb_linearize(nskb)) {
+			kfree_skb_list(skb->next);
+			skb->next = NULL;
+			return ERR_PTR(-ENOMEM);
+		}
+	} while (list_skb);
+
+	skb->truesize = skb->truesize - delta_truesize;
+	skb->data_len = skb->data_len - delta_len;
+	skb->len = skb->len - delta_len;
+
+	skb_gso_reset(skb);
+
+	skb->prev = tail;
+
+	if (skb_needs_linearize(skb, features) &&
+	    __skb_linearize(skb)) {
+		skb->next = NULL;
+		kfree_skb_list(skb->next);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	skb_get(skb);
+
+	return skb;
+}
+EXPORT_SYMBOL_GPL(skb_segment_list);
+
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
+{
+	if (unlikely(p->len + skb->len >= 65536))
+		return -E2BIG;
+
+	if (NAPI_GRO_CB(p)->last == p)
+		skb_shinfo(p)->frag_list = skb;
+	else
+		NAPI_GRO_CB(p)->last->next = skb;
+
+	skb_pull(skb, skb_gro_offset(skb));
+
+	NAPI_GRO_CB(p)->last = skb;
+	NAPI_GRO_CB(p)->count++;
+	p->data_len += skb->len;
+	p->truesize += skb->truesize;
+	p->len += skb->len;
+
+	NAPI_GRO_CB(skb)->same_flow = 1;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(skb_gro_receive_list);
+
 /**
  *	skb_segment - Perform protocol segmentation on skb.
  *	@head_skb: buffer to segment