diff mbox series

[v2] ipvs: allow tunneling with gue encapsulation

Message ID 20190316060635.2594-1-hengqing.hu@gmail.com
State Awaiting Upstream
Delegated to: David Miller
Headers show
Series [v2] ipvs: allow tunneling with gue encapsulation | expand

Commit Message

Jacky Hu March 16, 2019, 6:06 a.m. UTC
ipip packets are blocked in some public cloud environments, this patch
allows gue encapsulation with the tunneling method, which would make
tunneling working in those environments.

Signed-off-by: Jacky Hu <hengqing.hu@gmail.com>
---
 include/net/ip_vs.h             |  5 ++
 include/uapi/linux/ip_vs.h      | 17 +++++++
 net/netfilter/ipvs/ip_vs_ctl.c  | 20 +++++++-
 net/netfilter/ipvs/ip_vs_xmit.c | 85 +++++++++++++++++++++++++++++++--
 4 files changed, 122 insertions(+), 5 deletions(-)

Comments

kernel test robot March 16, 2019, 10:58 p.m. UTC | #1
Hi Jacky,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on ipvs-next/master]
[also build test WARNING on v5.0 next-20190306]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Jacky-Hu/ipvs-allow-tunneling-with-gue-encapsulation/20190317-042939
base:   https://git.kernel.org/pub/scm/linux/kernel/git/horms/ipvs-next.git master
reproduce:
        # apt-get install sparse
        make ARCH=x86_64 allmodconfig
        make C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__'


sparse warnings: (new ones prefixed by >>)

>> net/netfilter/ipvs/ip_vs_ctl.c:835:42: sparse: incorrect type in argument 2 (different base types) @@    expected int [signed] i @@    got restricted __be1int [signed] i @@
   net/netfilter/ipvs/ip_vs_ctl.c:835:42:    expected int [signed] i
   net/netfilter/ipvs/ip_vs_ctl.c:835:42:    got restricted __be16 [usertype] tun_port
   net/netfilter/ipvs/ip_vs_ctl.c:1175:44: sparse: expression using sizeof(void)
>> net/netfilter/ipvs/ip_vs_ctl.c:3329:41: sparse: incorrect type in assignment (different base types) @@    expected restricted __be16 [usertype] tun_port @@    got 16 [usertype] tun_port @@
   net/netfilter/ipvs/ip_vs_ctl.c:3329:41:    expected restricted __be16 [usertype] tun_port
   net/netfilter/ipvs/ip_vs_ctl.c:3329:41:    got unsigned short
   net/netfilter/ipvs/ip_vs_ctl.c:1291:27: sparse: dereference of noderef expression

vim +835 net/netfilter/ipvs/ip_vs_ctl.c

   805	
   806	/*
   807	 *	Update a destination in the given service
   808	 */
   809	static void
   810	__ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
   811			    struct ip_vs_dest_user_kern *udest, int add)
   812	{
   813		struct netns_ipvs *ipvs = svc->ipvs;
   814		struct ip_vs_service *old_svc;
   815		struct ip_vs_scheduler *sched;
   816		int conn_flags;
   817	
   818		/* We cannot modify an address and change the address family */
   819		BUG_ON(!add && udest->af != dest->af);
   820	
   821		if (add && udest->af != svc->af)
   822			ipvs->mixed_address_family_dests++;
   823	
   824		/* keep the last_weight with latest non-0 weight */
   825		if (add || udest->weight != 0)
   826			atomic_set(&dest->last_weight, udest->weight);
   827	
   828		/* set the weight and the flags */
   829		atomic_set(&dest->weight, udest->weight);
   830		conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK;
   831		conn_flags |= IP_VS_CONN_F_INACTIVE;
   832	
   833		/* set the tunnel info */
   834		atomic_set(&dest->tun_type, udest->tun_type);
 > 835		atomic_set(&dest->tun_port, udest->tun_port);
   836	
   837		/* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */
   838		if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) {
   839			conn_flags |= IP_VS_CONN_F_NOOUTPUT;
   840		} else {
   841			/*
   842			 *    Put the real service in rs_table if not present.
   843			 *    For now only for NAT!
   844			 */
   845			ip_vs_rs_hash(ipvs, dest);
   846			/* FTP-NAT requires conntrack for mangling */
   847			if (svc->port == FTPPORT)
   848				ip_vs_register_conntrack(svc);
   849		}
   850		atomic_set(&dest->conn_flags, conn_flags);
   851	
   852		/* bind the service */
   853		old_svc = rcu_dereference_protected(dest->svc, 1);
   854		if (!old_svc) {
   855			__ip_vs_bind_svc(dest, svc);
   856		} else {
   857			if (old_svc != svc) {
   858				ip_vs_zero_stats(&dest->stats);
   859				__ip_vs_bind_svc(dest, svc);
   860				__ip_vs_svc_put(old_svc, true);
   861			}
   862		}
   863	
   864		/* set the dest status flags */
   865		dest->flags |= IP_VS_DEST_F_AVAILABLE;
   866	
   867		if (udest->u_threshold == 0 || udest->u_threshold > dest->u_threshold)
   868			dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
   869		dest->u_threshold = udest->u_threshold;
   870		dest->l_threshold = udest->l_threshold;
   871	
   872		dest->af = udest->af;
   873	
   874		spin_lock_bh(&dest->dst_lock);
   875		__ip_vs_dst_cache_reset(dest);
   876		spin_unlock_bh(&dest->dst_lock);
   877	
   878		if (add) {
   879			ip_vs_start_estimator(svc->ipvs, &dest->stats);
   880			list_add_rcu(&dest->n_list, &svc->destinations);
   881			svc->num_dests++;
   882			sched = rcu_dereference_protected(svc->scheduler, 1);
   883			if (sched && sched->add_dest)
   884				sched->add_dest(svc, dest);
   885		} else {
   886			sched = rcu_dereference_protected(svc->scheduler, 1);
   887			if (sched && sched->upd_dest)
   888				sched->upd_dest(svc, dest);
   889		}
   890	}
   891	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
diff mbox series

Patch

diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a0d2e0bb9a94..56c1770b00fe 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -603,6 +603,9 @@  struct ip_vs_dest_user_kern {
 
 	/* Address family of addr */
 	u16			af;
+
+	u16			tun_type;	/* tunnel type */
+	__be16			tun_port;	/* tunnel port */
 };
 
 
@@ -663,6 +666,8 @@  struct ip_vs_dest {
 	atomic_t		conn_flags;	/* flags to copy to conn */
 	atomic_t		weight;		/* server weight */
 	atomic_t		last_weight;	/* server latest weight */
+	atomic_t		tun_type;	/* tunnel type */
+	atomic_t		tun_port;	/* tunnel port */
 
 	refcount_t		refcnt;		/* reference counter */
 	struct ip_vs_stats      stats;          /* statistics */
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 1c916b2f89dc..b43297691337 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -124,6 +124,13 @@ 
 
 #define IP_VS_PEDATA_MAXLEN     255
 
+/* Tunnel types */
+enum {
+	IP_VS_CONN_F_TUNNEL_TYPE_IPIP = 0,	/* IPIP */
+	IP_VS_CONN_F_TUNNEL_TYPE_GUE,		/* GUE */
+	IP_VS_CONN_F_TUNNEL_TYPE_MAX,
+};
+
 /*
  *	The struct ip_vs_service_user and struct ip_vs_dest_user are
  *	used to set IPVS rules through setsockopt.
@@ -155,6 +162,9 @@  struct ip_vs_dest_user {
 	/* thresholds for active connections */
 	__u32		u_threshold;	/* upper threshold */
 	__u32		l_threshold;	/* lower threshold */
+
+	__u16		tun_type;	/* tunnel type */
+	__be16		tun_port;	/* tunnel port */
 };
 
 
@@ -220,6 +230,9 @@  struct ip_vs_dest_entry {
 	__u32		u_threshold;	/* upper threshold */
 	__u32		l_threshold;	/* lower threshold */
 
+	__u16		tun_type;	/* tunnel type */
+	__be16		tun_port;	/* tunnel port */
+
 	__u32		activeconns;	/* active connections */
 	__u32		inactconns;	/* inactive connections */
 	__u32		persistconns;	/* persistent connections */
@@ -392,6 +405,10 @@  enum {
 
 	IPVS_DEST_ATTR_STATS64,		/* nested attribute for dest stats */
 
+	IPVS_DEST_ATTR_TUN_TYPE,	/* tunnel type */
+
+	IPVS_DEST_ATTR_TUN_PORT,	/* tunnel port */
+
 	__IPVS_DEST_ATTR_MAX,
 };
 
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 432141f04af3..48509b03a5ea 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -830,6 +830,10 @@  __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
 	conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK;
 	conn_flags |= IP_VS_CONN_F_INACTIVE;
 
+	/* set the tunnel info */
+	atomic_set(&dest->tun_type, udest->tun_type);
+	atomic_set(&dest->tun_port, udest->tun_port);
+
 	/* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */
 	if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) {
 		conn_flags |= IP_VS_CONN_F_NOOUTPUT;
@@ -2869,6 +2873,8 @@  static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
 	[IPVS_DEST_ATTR_PERSIST_CONNS]	= { .type = NLA_U32 },
 	[IPVS_DEST_ATTR_STATS]		= { .type = NLA_NESTED },
 	[IPVS_DEST_ATTR_ADDR_FAMILY]	= { .type = NLA_U16 },
+	[IPVS_DEST_ATTR_TUN_TYPE]	= { .type = NLA_U8 },
+	[IPVS_DEST_ATTR_TUN_PORT]	= { .type = NLA_U16 },
 };
 
 static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
@@ -3172,6 +3178,10 @@  static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
 			 IP_VS_CONN_F_FWD_MASK)) ||
 	    nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
 			atomic_read(&dest->weight)) ||
+	    nla_put_u8(skb, IPVS_DEST_ATTR_TUN_TYPE,
+		       atomic_read(&dest->tun_type)) ||
+	    nla_put_u16(skb, IPVS_DEST_ATTR_TUN_PORT,
+			atomic_read(&dest->tun_port)) ||
 	    nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
 	    nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
 	    nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
@@ -3294,12 +3304,14 @@  static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
 	/* If a full entry was requested, check for the additional fields */
 	if (full_entry) {
 		struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh,
-			      *nla_l_thresh;
+			      *nla_l_thresh, *nla_tun_type, *nla_tun_port;
 
 		nla_fwd		= attrs[IPVS_DEST_ATTR_FWD_METHOD];
 		nla_weight	= attrs[IPVS_DEST_ATTR_WEIGHT];
 		nla_u_thresh	= attrs[IPVS_DEST_ATTR_U_THRESH];
 		nla_l_thresh	= attrs[IPVS_DEST_ATTR_L_THRESH];
+		nla_tun_type	= attrs[IPVS_DEST_ATTR_TUN_TYPE];
+		nla_tun_port	= attrs[IPVS_DEST_ATTR_TUN_PORT];
 
 		if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh))
 			return -EINVAL;
@@ -3309,6 +3321,12 @@  static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
 		udest->weight = nla_get_u32(nla_weight);
 		udest->u_threshold = nla_get_u32(nla_u_thresh);
 		udest->l_threshold = nla_get_u32(nla_l_thresh);
+
+		if (nla_tun_type)
+			udest->tun_type = nla_get_u8(nla_tun_type);
+
+		if (nla_tun_port)
+			udest->tun_port = nla_get_u16(nla_tun_port);
 	}
 
 	return 0;
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 473cce2a5231..3efb46a14163 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -32,6 +32,7 @@ 
 #include <linux/slab.h>
 #include <linux/tcp.h>                  /* for tcphdr */
 #include <net/ip.h>
+#include <net/gue.h>
 #include <net/tcp.h>                    /* for csum_tcpudp_magic */
 #include <net/udp.h>
 #include <net/icmp.h>                   /* for icmp_send */
@@ -989,6 +990,42 @@  static inline int __tun_gso_type_mask(int encaps_af, int orig_af)
 	}
 }
 
+static int
+__build_gue_header(struct net *net, struct sk_buff *skb,
+		   struct ip_vs_conn *cp, __u8 *next_protocol)
+{
+	__be16 dport;
+	__be16 sport;
+	struct udphdr  *udph;	/* Our new UDP header */
+	struct guehdr  *gueh;	/* Our new GUE header */
+
+	skb_push(skb, sizeof(struct guehdr));
+
+	gueh = (struct guehdr *)skb->data;
+
+	gueh->control = 0;
+	gueh->version = 0;
+	gueh->hlen = 0;
+	gueh->flags = 0;
+	gueh->proto_ctype = *next_protocol;
+
+	skb_push(skb, sizeof(struct udphdr));
+	skb_reset_transport_header(skb);
+
+	udph = udp_hdr(skb);
+
+	dport = htons(atomic_read(&cp->dest->tun_port));
+	sport = udp_flow_src_port(net, skb, 0, 0, false);
+	udph->dest = dport;
+	udph->source = sport;
+	udph->len = htons(skb->len);
+	udph->check = 0;
+
+	*next_protocol = IPPROTO_UDP;
+
+	return 0;
+}
+
 /*
  *   IP Tunneling transmitter
  *
@@ -1025,6 +1062,7 @@  ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 	struct iphdr  *iph;			/* Our new IP header */
 	unsigned int max_headroom;		/* The extra header space needed */
 	int ret, local;
+	int tun_type, gso_type;
 
 	EnterFunction(10);
 
@@ -1033,7 +1071,7 @@  ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 				   IP_VS_RT_MODE_NON_LOCAL |
 				   IP_VS_RT_MODE_CONNECT |
 				   IP_VS_RT_MODE_TUNNEL, &saddr, ipvsh);
-	if (local < 0)
+	if (!cp->dest || local < 0)
 		goto tx_error;
 	if (local)
 		return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
@@ -1046,6 +1084,13 @@  ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 	 */
 	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
 
+	tun_type = atomic_read(&cp->dest->tun_type);
+
+	switch (tun_type) {
+	case IP_VS_CONN_F_TUNNEL_TYPE_GUE:
+		max_headroom += sizeof(struct udphdr) + sizeof(struct guehdr);
+	}
+
 	/* We only care about the df field if sysctl_pmtu_disc(ipvs) is set */
 	dfp = sysctl_pmtu_disc(ipvs) ? &df : NULL;
 	skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
@@ -1054,11 +1099,23 @@  ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 	if (IS_ERR(skb))
 		goto tx_error;
 
-	if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)))
+	switch (tun_type) {
+	case IP_VS_CONN_F_TUNNEL_TYPE_IPIP:
+		gso_type = __tun_gso_type_mask(AF_INET, cp->af);
+	case IP_VS_CONN_F_TUNNEL_TYPE_GUE:
+		gso_type = SKB_GSO_UDP_TUNNEL;
+	}
+
+	if (iptunnel_handle_offloads(skb, gso_type))
 		goto tx_error;
 
 	skb->transport_header = skb->network_header;
 
+	switch (tun_type) {
+	case IP_VS_CONN_F_TUNNEL_TYPE_GUE:
+		__build_gue_header(dev_net(tdev), skb, cp, &next_protocol);
+	}
+
 	skb_push(skb, sizeof(struct iphdr));
 	skb_reset_network_header(skb);
 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -1112,6 +1169,7 @@  ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 	struct ipv6hdr  *iph;		/* Our new IP header */
 	unsigned int max_headroom;	/* The extra header space needed */
 	int ret, local;
+	int tun_type, gso_type;
 
 	EnterFunction(10);
 
@@ -1121,7 +1179,7 @@  ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 				      IP_VS_RT_MODE_LOCAL |
 				      IP_VS_RT_MODE_NON_LOCAL |
 				      IP_VS_RT_MODE_TUNNEL);
-	if (local < 0)
+	if (!cp->dest || local < 0)
 		goto tx_error;
 	if (local)
 		return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
@@ -1134,17 +1192,36 @@  ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 	 */
 	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
 
+	tun_type = atomic_read(&cp->dest->tun_type);
+
+	switch (tun_type) {
+	case IP_VS_CONN_F_TUNNEL_TYPE_GUE:
+		max_headroom += sizeof(struct udphdr) + sizeof(struct guehdr);
+	}
+
 	skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
 					 &next_protocol, &payload_len,
 					 &dsfield, &ttl, NULL);
 	if (IS_ERR(skb))
 		goto tx_error;
 
-	if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)))
+	switch (tun_type) {
+	case IP_VS_CONN_F_TUNNEL_TYPE_IPIP:
+		gso_type = __tun_gso_type_mask(AF_INET, cp->af);
+	case IP_VS_CONN_F_TUNNEL_TYPE_GUE:
+		gso_type = SKB_GSO_UDP_TUNNEL;
+	}
+
+	if (iptunnel_handle_offloads(skb, gso_type))
 		goto tx_error;
 
 	skb->transport_header = skb->network_header;
 
+	switch (tun_type) {
+	case IP_VS_CONN_F_TUNNEL_TYPE_GUE:
+		__build_gue_header(dev_net(tdev), skb, cp, &next_protocol);
+	}
+
 	skb_push(skb, sizeof(struct ipv6hdr));
 	skb_reset_network_header(skb);
 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));