diff mbox series

[net-next,2/3] ip6_gre: Refactor ip6gre xmit codes

Message ID 1509907145-42750-3-git-send-email-u9012063@gmail.com
State Changes Requested, archived
Delegated to: David Miller
Headers show
Series ip6_gre: add erspan native tunnel for ipv6 | expand

Commit Message

William Tu Nov. 5, 2017, 6:39 p.m. UTC
This patch refactors the ip6gre_xmit_{ipv4, ipv6}.
It is a prep work to add the ip6erspan tunnel.

Signed-off-by: William Tu <u9012063@gmail.com>
---
 net/ipv6/ip6_gre.c | 124 ++++++++++++++++++++++++++++++++---------------------
 1 file changed, 76 insertions(+), 48 deletions(-)

Comments

David Miller Nov. 8, 2017, 2:10 a.m. UTC | #1
From: William Tu <u9012063@gmail.com>
Date: Sun,  5 Nov 2017 10:39:04 -0800

> diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
> index 3e10c51e7e0c..8c7612f32926 100644
> --- a/net/ipv6/ip6_gre.c
> +++ b/net/ipv6/ip6_gre.c
> @@ -497,6 +497,79 @@ static int gre_handle_offloads(struct sk_buff *skb, bool csum)
>  					csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
>  }
>  
> +static inline void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
> +					    struct net_device *dev,
> +					    struct flowi6 *fl6, __u8 *dsfield,
> +					    int *encap_limit)

Please do not use 'inline' in foo.c files, let the compiler decide.

Thank you.
William Tu Nov. 8, 2017, 1:06 p.m. UTC | #2
On Tue, Nov 7, 2017 at 6:10 PM, David Miller <davem@davemloft.net> wrote:
> From: William Tu <u9012063@gmail.com>
> Date: Sun,  5 Nov 2017 10:39:04 -0800
>
>> diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
>> index 3e10c51e7e0c..8c7612f32926 100644
>> --- a/net/ipv6/ip6_gre.c
>> +++ b/net/ipv6/ip6_gre.c
>> @@ -497,6 +497,79 @@ static int gre_handle_offloads(struct sk_buff *skb, bool csum)
>>                                       csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
>>  }
>>
>> +static inline void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
>> +                                         struct net_device *dev,
>> +                                         struct flowi6 *fl6, __u8 *dsfield,
>> +                                         int *encap_limit)
>
> Please do not use 'inline' in foo.c files, let the compiler decide.
>
> Thank you.

Thanks. I will remove it and submit v2.
William
diff mbox series

Patch

diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 3e10c51e7e0c..8c7612f32926 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -497,6 +497,79 @@  static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 					csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
 
+static inline void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
+					    struct net_device *dev,
+					    struct flowi6 *fl6, __u8 *dsfield,
+					    int *encap_limit)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	const struct iphdr *iph = ip_hdr(skb);
+
+	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+		*encap_limit = t->parms.encap_limit;
+
+	memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+		*dsfield = ipv4_get_dsfield(iph);
+	else
+		*dsfield = ip6_tclass(t->parms.flowinfo);
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+		fl6->flowi6_mark = skb->mark;
+	else
+		fl6->flowi6_mark = t->parms.fwmark;
+
+	fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+}
+
+static inline int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
+					   struct net_device *dev,
+					   struct flowi6 *fl6, __u8 *dsfield,
+					   int *encap_limit)
+{
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	__u16 offset;
+
+	offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+	/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+	ipv6h = ipv6_hdr(skb);
+
+	if (offset > 0) {
+		struct ipv6_tlv_tnl_enc_lim *tel;
+
+		tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
+		if (tel->encap_limit == 0) {
+			icmpv6_send(skb, ICMPV6_PARAMPROB,
+				    ICMPV6_HDR_FIELD, offset + 2);
+			return -1;
+		}
+		*encap_limit = tel->encap_limit - 1;
+	} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
+		*encap_limit = t->parms.encap_limit;
+	}
+
+	memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+		*dsfield = ipv6_get_dsfield(ipv6h);
+	else
+		*dsfield = ip6_tclass(t->parms.flowinfo);
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
+		fl6->flowlabel |= ip6_flowlabel(ipv6h);
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+		fl6->flowi6_mark = skb->mark;
+	else
+		fl6->flowi6_mark = t->parms.fwmark;
+
+	fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
+	return 0;
+}
+
 static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
 			       struct net_device *dev, __u8 dsfield,
 			       struct flowi6 *fl6, int encap_limit,
@@ -533,7 +606,6 @@  static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip6_tnl *t = netdev_priv(dev);
-	const struct iphdr  *iph = ip_hdr(skb);
 	int encap_limit = -1;
 	struct flowi6 fl6;
 	__u8 dsfield;
@@ -542,21 +614,7 @@  static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
 
 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 
-	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-		encap_limit = t->parms.encap_limit;
-
-	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-		dsfield = ipv4_get_dsfield(iph);
-	else
-		dsfield = ip6_tclass(t->parms.flowinfo);
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
-		fl6.flowi6_mark = skb->mark;
-	else
-		fl6.flowi6_mark = t->parms.fwmark;
-
-	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+	prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, &dsfield, &encap_limit);
 
 	err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
 	if (err)
@@ -580,7 +638,6 @@  static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
 	struct ip6_tnl *t = netdev_priv(dev);
 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 	int encap_limit = -1;
-	__u16 offset;
 	struct flowi6 fl6;
 	__u8 dsfield;
 	__u32 mtu;
@@ -589,37 +646,8 @@  static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
 	if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
 		return -1;
 
-	offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
-	/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
-	ipv6h = ipv6_hdr(skb);
-
-	if (offset > 0) {
-		struct ipv6_tlv_tnl_enc_lim *tel;
-		tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
-		if (tel->encap_limit == 0) {
-			icmpv6_send(skb, ICMPV6_PARAMPROB,
-				    ICMPV6_HDR_FIELD, offset + 2);
-			return -1;
-		}
-		encap_limit = tel->encap_limit - 1;
-	} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-		encap_limit = t->parms.encap_limit;
-
-	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-		dsfield = ipv6_get_dsfield(ipv6h);
-	else
-		dsfield = ip6_tclass(t->parms.flowinfo);
-
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
-		fl6.flowlabel |= ip6_flowlabel(ipv6h);
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
-		fl6.flowi6_mark = skb->mark;
-	else
-		fl6.flowi6_mark = t->parms.fwmark;
-
-	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+	if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
+		return -1;
 
 	if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
 		return -1;