@@ -547,10 +547,8 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
+ (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
+ if (unlikely(err))
goto free_rt;
- }
skb = vlan_hwaccel_push_inside(skb);
if (!skb) {
@@ -558,11 +556,9 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
goto free_rt;
}
- skb = udp_tunnel_handle_offloads(skb, csum, 0, false);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
+ err = udp_tunnel_handle_offloads(skb, csum, false);
+ if (err)
goto free_rt;
- }
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
gnvh->ver = GENEVE_VER;
gnvh->opt_len = opt_len / 4;
@@ -578,6 +574,7 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
return 0;
free_rt:
+ kfree_skb(skb);
ip_rt_put(rt);
return err;
}
@@ -31,11 +31,27 @@ static inline int rpl_iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
}
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
-struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb,
- bool csum_help, int gso_type_mask,
- void (*fix_segment)(struct sk_buff *));
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+int ovs_iptunnel_handle_offloads(struct sk_buff *skb,
+ bool csum_help, int gso_type_mask,
+ void (*fix_segment)(struct sk_buff *));
+
+/* This is is required to compile upstream gre.h. gre_handle_offloads()
+ * is defined in gre.h and needs iptunnel_handle_offloads(). This provides
+ * default signature for this function.
+ * rpl prefix is to make OVS build happy.
+ */
+#define iptunnel_handle_offloads rpl_iptunnel_handle_offloads
+struct sk_buff *rpl_iptunnel_handle_offloads(struct sk_buff *skb,
+ bool csum_help,
+ int gso_type_mask);
+#else
+
+#define ovs_iptunnel_handle_offloads(skb, csum_help, gso_type_mask, fix_segment) \
+ iptunnel_handle_offloads(skb, gso_type_mask)
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
#define iptunnel_xmit rpl_iptunnel_xmit
int rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl,
@@ -43,9 +59,6 @@ int rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
#else
-#define ovs_iptunnel_handle_offloads(skb, csum_help, gso_type_mask, fix_segment) \
- iptunnel_handle_offloads(skb, csum_help, gso_type_mask)
-
#define rpl_iptunnel_xmit iptunnel_xmit
int rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl,
@@ -53,14 +66,6 @@ int rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
#endif /* 3.18 */
-/* This is not required for OVS on kernel older than 3.18, but gre.h
- * header file needs this declaration for function gre_handle_offloads().
- * So it is defined for all kernel version.
- */
-#define rpl_iptunnel_handle_offloads iptunnel_handle_offloads
-struct sk_buff *rpl_iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
- int gso_type_mask);
-
#ifndef TUNNEL_CSUM
#define TUNNEL_CSUM __cpu_to_be16(0x01)
#define TUNNEL_ROUTING __cpu_to_be16(0x02)
@@ -6,21 +6,10 @@
#include <net/dst_metadata.h>
#include <linux/netdev_features.h>
+
#ifdef HAVE_UDP_TUNNEL_IPV6
#include_next <net/udp_tunnel.h>
-static inline struct sk_buff *
-rpl_udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum,
- int type, bool is_vxlan)
-{
- if (skb_is_gso(skb) && skb_is_encapsulated(skb)) {
- kfree_skb(skb);
- return ERR_PTR(-ENOSYS);
- }
- return udp_tunnel_handle_offloads(skb, udp_csum);
-}
-#define udp_tunnel_handle_offloads rpl_udp_tunnel_handle_offloads
-
#else
#include <net/ip_tunnels.h>
@@ -84,20 +73,58 @@ int rpl_udp_tunnel_xmit_skb(struct rtable *rt,
#define udp_tunnel_sock_release rpl_udp_tunnel_sock_release
void rpl_udp_tunnel_sock_release(struct socket *sock);
+#define udp_tunnel_encap_enable(sock) udp_encap_enable()
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define udp_tunnel6_xmit_skb rpl_udp_tunnel6_xmit_skb
+int rpl_udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb,
+ struct net_device *dev, struct in6_addr *saddr,
+ struct in6_addr *daddr,
+ __u8 prio, __u8 ttl, __be16 src_port,
+ __be16 dst_port, bool nocheck);
+#endif
+
+static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ struct udphdr *uh;
+
+ uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
+ skb_shinfo(skb)->gso_type |= uh->check ?
+ SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+/* this is to handle the return type change in handle-offload
+ * functions.
+ */
+static inline int
+rpl_udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum,
+ bool is_vxlan)
+{
+ if (skb_is_gso(skb) && skb_is_encapsulated(skb)) {
+ kfree_skb(skb);
+ return -ENOSYS;
+ }
+ return udp_tunnel_handle_offloads(skb, udp_csum);
+}
+
+#else
void ovs_udp_gso(struct sk_buff *skb);
void ovs_udp_csum_gso(struct sk_buff *skb);
-#define udp_tunnel_encap_enable(sock) udp_encap_enable()
-static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
- bool udp_csum,
- int type,
- bool is_vxlan)
+static inline int rpl_udp_tunnel_handle_offloads(struct sk_buff *skb,
+ bool udp_csum,
+ bool is_vxlan)
{
+ int type = 0;
+
void (*fix_segment)(struct sk_buff *);
if (skb_is_gso(skb) && skb_is_encapsulated(skb)) {
kfree_skb(skb);
- return ERR_PTR(-ENOSYS);
+ return -ENOSYS;
}
type |= udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
@@ -113,27 +140,9 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
return ovs_iptunnel_handle_offloads(skb, udp_csum, type, fix_segment);
}
-
-#if IS_ENABLED(CONFIG_IPV6)
-#define udp_tunnel6_xmit_skb rpl_udp_tunnel6_xmit_skb
-int rpl_udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb,
- struct net_device *dev, struct in6_addr *saddr,
- struct in6_addr *daddr,
- __u8 prio, __u8 ttl, __be16 src_port,
- __be16 dst_port, bool nocheck);
-#endif
-
-static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
-{
- struct udphdr *uh;
-
- uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
- skb_shinfo(skb)->gso_type |= uh->check ?
- SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
-}
#endif
+#define udp_tunnel_handle_offloads rpl_udp_tunnel_handle_offloads
static inline void ovs_udp_tun_rx_dst(struct ip_tunnel_info *info,
struct sk_buff *skb,
unsigned short family,
@@ -171,7 +171,8 @@ static int gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
return 0;
}
-#ifndef HAVE_GRE_HANDLE_OFFLOADS
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+/* gre_handle_offloads() has different return type on older kernsl. */
static void gre_nop_fix(struct sk_buff *skb) { }
static void gre_csum_fix(struct sk_buff *skb)
@@ -193,7 +194,7 @@ static bool is_gre_gso(struct sk_buff *skb)
return skb_is_gso(skb);
}
-static struct sk_buff *rpl_gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
+static int rpl_gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
{
int type = gre_csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE;
gso_fix_segment_t fix_segment;
@@ -213,12 +214,11 @@ static bool is_gre_gso(struct sk_buff *skb)
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM);
}
-static struct sk_buff *rpl_gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
+static int rpl_gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
{
- if (skb_is_gso(skb) && skb_is_encapsulated(skb)) {
- kfree_skb(skb);
- return ERR_PTR(-ENOSYS);
- }
+ if (skb_is_gso(skb) && skb_is_encapsulated(skb))
+ return -ENOSYS;
+
#undef gre_handle_offloads
return gre_handle_offloads(skb, gre_csum);
}
@@ -318,11 +318,9 @@ netdev_tx_t rpl_gre_fb_xmit(struct sk_buff *skb)
}
/* Push Tunnel header. */
- skb = rpl_gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
- if (IS_ERR(skb)) {
- skb = NULL;
+ err = rpl_gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
+ if (err)
goto err_free_rt;
- }
flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
@@ -87,9 +87,9 @@ int rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(rpl_iptunnel_xmit);
-struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb,
- bool csum_help, int gso_type_mask,
- void (*fix_segment)(struct sk_buff *))
+int ovs_iptunnel_handle_offloads(struct sk_buff *skb,
+ bool csum_help, int gso_type_mask,
+ void (*fix_segment)(struct sk_buff *))
{
int err;
@@ -113,7 +113,7 @@ struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb,
if (unlikely(err))
goto error;
skb_shinfo(skb)->gso_type |= gso_type_mask;
- return skb;
+ return 0;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
@@ -133,10 +133,9 @@ struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb,
} else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
- return skb;
+ return 0;
error:
- kfree_skb(skb);
- return ERR_PTR(err);
+ return err;
}
EXPORT_SYMBOL_GPL(ovs_iptunnel_handle_offloads);
#endif
@@ -344,12 +344,9 @@ netdev_tx_t rpl_lisp_xmit(struct sk_buff *skb)
skb_reset_mac_header(skb);
skb->vlan_tci = 0;
- skb = udp_tunnel_handle_offloads(skb, false, 0, false);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- skb = NULL;
+ err = udp_tunnel_handle_offloads(skb, false, false);
+ if (err)
goto err_free_rt;
- }
src_port = htons(get_src_port(net, skb));
dst_port = lisp_dev->dst_port;
@@ -72,9 +72,6 @@ void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
void __percpu *p;
int i;
- /* older kernel do not allow all GFP flags, specifically atomic
- * allocation.
- */
if (gfp & ~(GFP_KERNEL | __GFP_ZERO))
return NULL;
p = __alloc_percpu(size, align);
@@ -1030,6 +1030,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
__be16 src_port, __be16 dst_port, __be32 vni,
struct vxlan_metadata *md, bool xnet, u32 vxflags)
{
+ void (*fix_segment)(struct sk_buff *);
struct vxlanhdr *vxh;
int min_headroom;
int err;
@@ -1074,9 +1075,11 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
goto err;
}
- skb = udp_tunnel_handle_offloads(skb, udp_sum, type, true);
- if (IS_ERR(skb)) {
- err = -EINVAL;
+ type |= udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ fix_segment = udp_sum ? ovs_udp_gso : ovs_udp_csum_gso;
+ err = ovs_iptunnel_handle_offloads(skb, udp_sum, type, fix_segment);
+ if (err) {
+ kfree_skb(skb);
goto err;
}
@@ -1123,6 +1126,7 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk
__be16 src_port, __be16 dst_port, __be32 vni,
struct vxlan_metadata *md, bool xnet, u32 vxflags)
{
+ void (*fix_segment)(struct sk_buff *);
struct vxlanhdr *vxh;
int min_headroom;
int err;
@@ -1162,10 +1166,13 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk
if (WARN_ON(!skb))
return -ENOMEM;
- skb = udp_tunnel_handle_offloads(skb, udp_sum, type, true);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
+ type |= udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ fix_segment = udp_sum ? ovs_udp_gso : ovs_udp_csum_gso;
+ err = ovs_iptunnel_handle_offloads(skb, udp_sum, type, fix_segment);
+ if (err) {
+ kfree_skb(skb);
+ return err;
+ }
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_HF_VNI);
vxh->vx_vni = vni;
There is return type change in upstream handle-offload functions. Following patch brings these changes in. This is backport of aed069df ("ip_tunnel_core: iptunnel_handle_offloads returns int and doesn't free skb") I have also removed duplicate definitions of tunnel_handle_offloads() from ip-tunnel header. Signed-off-by: Pravin B Shelar <pshelar@ovn.org> --- datapath/linux/compat/geneve.c | 11 ++-- datapath/linux/compat/include/net/ip_tunnels.h | 35 ++++++----- datapath/linux/compat/include/net/udp_tunnel.h | 83 ++++++++++++++------------ datapath/linux/compat/ip_gre.c | 20 +++---- datapath/linux/compat/ip_tunnels_core.c | 13 ++-- datapath/linux/compat/lisp.c | 7 +-- datapath/linux/compat/utils.c | 3 - datapath/linux/compat/vxlan.c | 21 ++++--- 8 files changed, 101 insertions(+), 92 deletions(-)