From patchwork Tue Sep 19 00:38:51 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815215 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="aGB3SSwH"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3tK6LRDz9s78 for ; Tue, 19 Sep 2017 10:39:53 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751210AbdISAjd (ORCPT ); Mon, 18 Sep 2017 20:39:33 -0400 Received: from mail-pf0-f169.google.com ([209.85.192.169]:54195 "EHLO mail-pf0-f169.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751016AbdISAjc (ORCPT ); Mon, 18 Sep 2017 20:39:32 -0400 Received: by mail-pf0-f169.google.com with SMTP id x78so1102618pff.10 for ; Mon, 18 Sep 2017 17:39:32 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=p4/uHanYkSLTDDyJ1/R/fpW4eCV1EFLfoTQrShRbRC8=; b=aGB3SSwH67gCDaTTQ0FDQsgngY3ipBgrS1Pz83AYOc/URQ18q5HEf1EV94/6r5Ctf4 kpGLXlVEYyji7lRJgF0x6S5UEhLo20ZVDNYr+s+8yCUkxepFpTyvvkH6GvKd2sDLAv29 Xdtqcn3qbjVwT5/hliApZhpmD5rByFPS5Blftu0pExXdkwdoRIDqeJrc7wLBFekF9VqJ x2zHQZARaIts47OSjSlMsNwqsrOyIDV78Ha9EAM1SjNEYjcjpUtYUGhYZhYThL2Nhhuq phLuPRNPpC3q3ZsOVrG4/LZptuH9vGfhx3SjBVvNq6Y00CQhh9cm5eH88lpiXRcXWEHv gZCg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=p4/uHanYkSLTDDyJ1/R/fpW4eCV1EFLfoTQrShRbRC8=; b=CUw32eg/RQl9vdK8Yy4FqNvAsZwS1VdxV1uKBeC9a6pPSyLg5/SA7qqJKXejkfTXgk AzV5TElbQRsgJYdOqyV9pSUKL5wESTbB4N3SbOI/5+JGDWBLY+GgZQGDZnJKhzrLGF77 hQETtT4rUF3Wr9mD68t+C1rbrKm0zriB2Lsd/shcpPfo2STDs6wWNF+Rijc55AYqDSiP llpaKI6Fw23D8Qz3iQgDCHndknNGJcCkEfc9qy/CifmVHLRg+wYCjA/2ZwuFpS++93vQ EPS4D3fU6pqKP90L5qRNjxK3zDxxRUA6kRxZxJ+NsJjXGd4e1Y6uAfGBQFe9n14vLckE 3J0g== X-Gm-Message-State: AHPjjUgF+zh/+Pyw7R1sC01q24y6YdkugAXa+0VBlrSYfznM4UIDO7Lb FqbewgPUuTLfQB9F X-Google-Smtp-Source: AOwi7QDhjPzJ7ATztsS5JtBG8Z3TJi6DU9Ikbx+L667Y/GKW6L0OzxWP6TxvyvW+Wnw32hyI7VzNnQ== X-Received: by 10.101.83.4 with SMTP id m4mr353506pgq.266.1505781571883; Mon, 18 Sep 2017 17:39:31 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.39.30 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:39:30 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 01/14] iptunnel: Add common functions to get a tunnel route Date: Mon, 18 Sep 2017 17:38:51 -0700 Message-Id: <20170919003904.5124-2-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org ip_tunnel_get_route and ip6_tnl_get_route are create to return routes for a tunnel. These functions are derived from the VXLAN functions. Signed-off-by: Tom Herbert --- include/net/ip6_tunnel.h | 33 +++++++++++++++++++++++++++++++++ include/net/ip_tunnels.h | 33 +++++++++++++++++++++++++++++++++ net/ipv4/ip_tunnel.c | 41 +++++++++++++++++++++++++++++++++++++++++ net/ipv6/ip6_tunnel.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 150 insertions(+) diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 08fbc7f7d8d7..233097bf07a2 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -142,6 +142,39 @@ __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, struct net *ip6_tnl_get_link_net(const struct net_device *dev); int ip6_tnl_get_iflink(const struct net_device *dev); int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu); +struct dst_entry *__ip6_tnl_get_route(struct net_device *dev, + struct sk_buff *skb, struct sock *sk, + u8 proto, int oif, u8 tos, __be32 label, + const struct in6_addr *daddr, + struct in6_addr *saddr, + __be16 dport, __be16 sport, + struct dst_cache *dst_cache, + const struct ip_tunnel_info *info, + bool use_cache); + +static inline struct dst_entry *ip6_tnl_get_route(struct net_device *dev, + struct sk_buff *skb, struct sock *sk, u8 proto, + int oif, u8 tos, __be32 label, + const struct in6_addr *daddr, + struct in6_addr *saddr, + __be16 dport, __be16 sport, + struct dst_cache *dst_cache, + const struct ip_tunnel_info *info) +{ + bool use_cache = (ip_tunnel_dst_cache_usable(skb, info) && + (!tos || info)); + + if (use_cache) { + struct dst_entry *ndst = dst_cache_get_ip6(dst_cache, saddr); + + if (ndst) + return ndst; + } + + return __ip6_tnl_get_route(dev, skb, sk, proto, oif, tos, label, + daddr, saddr, dport, sport, dst_cache, + info, use_cache); +} static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, struct net_device *dev) diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 992652856fe8..91d5150a1044 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -284,6 +284,39 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm *p, __u32 fwmark); void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); +struct rtable *__ip_tunnel_get_route(struct net_device *dev, + struct sk_buff *skb, u8 proto, + int oif, u8 tos, + __be32 daddr, __be32 *saddr, + __be16 dport, __be16 sport, + struct dst_cache *dst_cache, + const struct ip_tunnel_info *info, + bool use_cache); + +static inline struct rtable *ip_tunnel_get_route(struct net_device *dev, + struct sk_buff *skb, u8 proto, + int oif, u8 tos, + __be32 daddr, __be32 *saddr, + __be16 dport, __be16 sport, + struct dst_cache *dst_cache, + const struct ip_tunnel_info *info) +{ + bool use_cache = (ip_tunnel_dst_cache_usable(skb, info) && + (!tos || info)); + + if (use_cache) { + struct rtable *rt; + + rt = dst_cache_get_ip4(dst_cache, saddr); + if (rt) + return rt; + } + + return __ip_tunnel_get_route(dev, skb, proto, oif, tos, + daddr, saddr, dport, sport, + dst_cache, info, use_cache); +} + struct ip_tunnel_encap_ops { size_t (*encap_hlen)(struct ip_tunnel_encap *e); int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index e9805ad664ac..f0f35333febd 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -935,6 +935,47 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) } EXPORT_SYMBOL_GPL(ip_tunnel_ioctl); +struct rtable *__ip_tunnel_get_route(struct net_device *dev, + struct sk_buff *skb, u8 proto, + int oif, u8 tos, + __be32 daddr, __be32 *saddr, + __be16 dport, __be16 sport, + struct dst_cache *dst_cache, + const struct ip_tunnel_info *info, + bool use_cache) +{ + struct rtable *rt = NULL; + struct flowi4 fl4; + + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_oif = oif; + fl4.flowi4_tos = RT_TOS(tos); + fl4.flowi4_mark = skb->mark; + fl4.flowi4_proto = proto; + fl4.daddr = daddr; + fl4.saddr = *saddr; + fl4.fl4_dport = dport; + fl4.fl4_sport = sport; + + rt = ip_route_output_key(dev_net(dev), &fl4); + if (likely(!IS_ERR(rt))) { + if (rt->dst.dev == dev) { + netdev_dbg(dev, "circular route to %pI4\n", &daddr); + ip_rt_put(rt); + return ERR_PTR(-ELOOP); + } + + *saddr = fl4.saddr; + if (use_cache) + dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); + } else { + netdev_dbg(dev, "no route to %pI4\n", &daddr); + return ERR_PTR(-ENETUNREACH); + } + return rt; +} +EXPORT_SYMBOL_GPL(__ip_tunnel_get_route); + int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) { struct ip_tunnel *tunnel = netdev_priv(dev); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index ae73164559d5..9a02b62c808b 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1663,6 +1663,49 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return err; } +struct dst_entry *__ip6_tnl_get_route(struct net_device *dev, + struct sk_buff *skb, struct sock *sk, + u8 proto, int oif, u8 tos, __be32 label, + const struct in6_addr *daddr, + struct in6_addr *saddr, + __be16 dport, __be16 sport, + struct dst_cache *dst_cache, + const struct ip_tunnel_info *info, + bool use_cache) +{ + struct dst_entry *ndst; + struct flowi6 fl6; + int err; + + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_oif = oif; + fl6.daddr = *daddr; + fl6.saddr = *saddr; + fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); + fl6.flowi6_mark = skb->mark; + fl6.flowi6_proto = proto; + fl6.fl6_dport = dport; + fl6.fl6_sport = sport; + + err = ipv6_stub->ipv6_dst_lookup(dev_net(dev), sk, &ndst, &fl6); + if (unlikely(err < 0)) { + netdev_dbg(dev, "no route to %pI6\n", daddr); + return ERR_PTR(-ENETUNREACH); + } + + if (unlikely(ndst->dev == dev)) { + netdev_dbg(dev, "circular route to %pI6\n", daddr); + dst_release(ndst); + return ERR_PTR(-ELOOP); + } + + *saddr = fl6.saddr; + if (use_cache) + dst_cache_set_ip6(dst_cache, ndst, saddr); + return ndst; +} +EXPORT_SYMBOL_GPL(__ip6_tnl_get_route); + /** * ip6_tnl_change_mtu - change mtu manually for tunnel device * @dev: virtual device associated with tunnel From patchwork Tue Sep 19 00:38:52 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815216 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="QqdKlShl"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3tL41yQz9s5L for ; Tue, 19 Sep 2017 10:39:54 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751302AbdISAjh (ORCPT ); Mon, 18 Sep 2017 20:39:37 -0400 Received: from mail-pg0-f44.google.com ([74.125.83.44]:43205 "EHLO mail-pg0-f44.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751227AbdISAjg (ORCPT ); Mon, 18 Sep 2017 20:39:36 -0400 Received: by mail-pg0-f44.google.com with SMTP id u18so1096845pgo.0 for ; Mon, 18 Sep 2017 17:39:36 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=atgSc7cSWpfP0yeG8RydMP5L2g1hZzx81Z5EoLNlTI8=; b=QqdKlShliK0UaTZquUSdD7W1c/pKjyg0pZpcc/YyRUUTMg54qmFLeAddyOa357xMzE 2dJeJ6N/j7vSYXo1E2HB1av48uWWleSOqu42c223xpJz2QDjOlsfxjeEFtvP621rg1L/ 8Y2BqpINjTqanfTZLNMa77C0QHxvIiwZttnA4kJ1Gvmy6Cwec7dCuMQfahzg6oI2Q5CU DjUUlWfukTtrOVh8fB2w8Ep3wJPh5v6kOEWGbLNc/vEhOEl+3oSm/pJxj+ZJV6tIYQus 0Rhz+p4m4wZrqHB+H+mL7K6sUBvuH9bcO5xV+ayuRH0HA65+AVJ4D3c4UIXfOi6IuLo/ gB4g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=atgSc7cSWpfP0yeG8RydMP5L2g1hZzx81Z5EoLNlTI8=; b=M+w36j++6UyduU/+iDmYUpTN4Q+M6be09UG3GHe9QNxPAGyraSxHv9MPoisgV9S5cw ITDd8/GwR+7AUn+iY3PDLrcNTHpcr/0ojcNt3HYJdUx71FqjkwR9QOR2U642UgQQbccS 6AQDyIdcfyKi6iJWlaffHIXCX1UbDt+HnS81FkLX94t+XOkb+6EBGomW2cL+/sxJvUz+ t78OlMr9BTTT5ZtC0Ev4KmaMofGEFm8EJMfGI6puA017iN7daGSJ48iW6/LbegMu5HYp kN056SnvMX9obCbqMRQhd8WJWF4AZq5aHUgW1xlDpAa3pxjuPUHOh19jLLCaPPPK7KQb 27FQ== X-Gm-Message-State: AHPjjUgc0R/Be/PKD6Ricx2YKq+Xs1yu5wyhPiFXQ+khe9lUiAstkvZu 2avGx+1xurHcHQhh X-Google-Smtp-Source: AOwi7QCpBhwz8GvunrWFY9+1KmWZrg6sssfvjmbV/cMECwLlgvbE1U6rkvlpubnvGnkXj41yM/3+Ng== X-Received: by 10.98.82.74 with SMTP id g71mr330648pfb.300.1505781576075; Mon, 18 Sep 2017 17:39:36 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.39.34 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:39:35 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 02/14] vxlan: Call common functions to get tunnel routes Date: Mon, 18 Sep 2017 17:38:52 -0700 Message-Id: <20170919003904.5124-3-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Call ip_tunnel_get_route and ip6_tnl_get_route to handle getting a route and dealing with the dst_cache. Signed-off-by: Tom Herbert --- drivers/net/vxlan.c | 84 ++++------------------------------------------------- 1 file changed, 5 insertions(+), 79 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d7c49cf1d5e9..810caa9adf37 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1867,47 +1867,11 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device struct dst_cache *dst_cache, const struct ip_tunnel_info *info) { - bool use_cache = ip_tunnel_dst_cache_usable(skb, info); - struct rtable *rt = NULL; - struct flowi4 fl4; - if (!sock4) return ERR_PTR(-EIO); - if (tos && !info) - use_cache = false; - if (use_cache) { - rt = dst_cache_get_ip4(dst_cache, saddr); - if (rt) - return rt; - } - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = oif; - fl4.flowi4_tos = RT_TOS(tos); - fl4.flowi4_mark = skb->mark; - fl4.flowi4_proto = IPPROTO_UDP; - fl4.daddr = daddr; - fl4.saddr = *saddr; - fl4.fl4_dport = dport; - fl4.fl4_sport = sport; - - rt = ip_route_output_key(vxlan->net, &fl4); - if (likely(!IS_ERR(rt))) { - if (rt->dst.dev == dev) { - netdev_dbg(dev, "circular route to %pI4\n", &daddr); - ip_rt_put(rt); - return ERR_PTR(-ELOOP); - } - - *saddr = fl4.saddr; - if (use_cache) - dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); - } else { - netdev_dbg(dev, "no route to %pI4\n", &daddr); - return ERR_PTR(-ENETUNREACH); - } - return rt; + return ip_tunnel_get_route(dev, skb, IPPROTO_UDP, oif, tos, daddr, + saddr, dport, sport, dst_cache, info); } #if IS_ENABLED(CONFIG_IPV6) @@ -1922,50 +1886,12 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, struct dst_cache *dst_cache, const struct ip_tunnel_info *info) { - bool use_cache = ip_tunnel_dst_cache_usable(skb, info); - struct dst_entry *ndst; - struct flowi6 fl6; - int err; - if (!sock6) return ERR_PTR(-EIO); - if (tos && !info) - use_cache = false; - if (use_cache) { - ndst = dst_cache_get_ip6(dst_cache, saddr); - if (ndst) - return ndst; - } - - memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_oif = oif; - fl6.daddr = *daddr; - fl6.saddr = *saddr; - fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); - fl6.flowi6_mark = skb->mark; - fl6.flowi6_proto = IPPROTO_UDP; - fl6.fl6_dport = dport; - fl6.fl6_sport = sport; - - err = ipv6_stub->ipv6_dst_lookup(vxlan->net, - sock6->sock->sk, - &ndst, &fl6); - if (unlikely(err < 0)) { - netdev_dbg(dev, "no route to %pI6\n", daddr); - return ERR_PTR(-ENETUNREACH); - } - - if (unlikely(ndst->dev == dev)) { - netdev_dbg(dev, "circular route to %pI6\n", daddr); - dst_release(ndst); - return ERR_PTR(-ELOOP); - } - - *saddr = fl6.saddr; - if (use_cache) - dst_cache_set_ip6(dst_cache, ndst, saddr); - return ndst; + return ip6_tnl_get_route(dev, skb, sock6->sock->sk, IPPROTO_UDP, oif, + tos, label, daddr, saddr, dport, sport, + dst_cache, info); } #endif From patchwork Tue Sep 19 00:38:53 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815217 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="d8whlsBw"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3tM1k45z9s78 for ; Tue, 19 Sep 2017 10:39:55 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751361AbdISAjm (ORCPT ); Mon, 18 Sep 2017 20:39:42 -0400 Received: from mail-pf0-f169.google.com ([209.85.192.169]:48467 "EHLO mail-pf0-f169.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751227AbdISAjj (ORCPT ); Mon, 18 Sep 2017 20:39:39 -0400 Received: by mail-pf0-f169.google.com with SMTP id n24so1107405pfk.5 for ; Mon, 18 Sep 2017 17:39:39 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=dm9swcGuIk75jL41bv8tNdZ2xCw67/rn0Fh5lKTZlYM=; b=d8whlsBwHBY63KJgj4x+folyJmeI2RxzNS8Wl6J7DEiEYKZiJntHxchMkxkiJDbqgU nOhHtXzjxlFghB33s9oT63ilnK/9a3bscn7OfHYuMwIR6+rw7d4nx9+2hKXHRHNuLxSW jUTaExhAWIAVadsZSnJ4R7N9+4J1re6aD/7RBkVEMHxVOdVO0OqZgQgHETiM/wIfeDxy Q+kpmOGjZJGVgfJBWt2qNmdH13llj/iXetOof/Dj2HExfTQJvCN1Pdpn2JiItsQmACqz u6LDDrJ8lZzDz8XIWn020cvXElMNLeqVJndv11TLFE8bfCaqrkFh639saJnzCADioxyo 7kjA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=dm9swcGuIk75jL41bv8tNdZ2xCw67/rn0Fh5lKTZlYM=; b=HD6Xj7y7PolKU+uVv20HH2Z9f7Of4HtQVnkoh/AvQ8DYoAvsgccJuSc3TIPshQtZ8w FzY7TtdFSCWw1K4AIIaDZF3x86agKqTSYV97Q4aZOAy4tTZwpTtXD+4MTt0HGT0HCTNa HAmdeLnrVDByAhj21eHovp871uPeybMeP8I4LWRcIq5Ouzc+0sGc+ydURlCZVls5OIMC RhvpGcMH4r5EVbS0JKGDmEsnT92aSUnFee9N/jdxYwTa99+HB5UWZGU4R8Z6YWzrWeNf yXzLMD14wt8fZt3MY9nj8QcvoBjpT6DMxA4JmtEnMTCLuA9DG9UnIQJLL+vspAdqOD0i rIQg== X-Gm-Message-State: AHPjjUifv9aevT0NtmBGJLV5uEz06s1su1BPyDO/wWYRSPk1CDv5OZA8 WcnwmKufABv+ZVjeWLg= X-Google-Smtp-Source: AOwi7QDTVJSonMWs14gqYSNTvi7D2aynR1oDg+CD71DnxZvpSO83LMFGTosADnETUF9tJ5MPplBqnA== X-Received: by 10.98.76.70 with SMTP id z67mr347087pfa.78.1505781579074; Mon, 18 Sep 2017 17:39:39 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.39.37 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:39:38 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 03/14] gtp: Call common functions to get tunnel routes and add dst_cache Date: Mon, 18 Sep 2017 17:38:53 -0700 Message-Id: <20170919003904.5124-4-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Call ip_tunnel_get_route and dst_cache to pdp context which should improve performance by obviating the need to perform a route lookup on every packet. Signed-off-by: Tom Herbert --- drivers/net/gtp.c | 59 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index f38e32a7ec9c..95df3bcebbb2 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -63,6 +63,8 @@ struct pdp_ctx { atomic_t tx_seq; struct rcu_head rcu_head; + + struct dst_cache dst_cache; }; /* One instance of the GTP device. */ @@ -379,20 +381,6 @@ static void gtp_dev_uninit(struct net_device *dev) free_percpu(dev->tstats); } -static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4, - const struct sock *sk, - __be32 daddr) -{ - memset(fl4, 0, sizeof(*fl4)); - fl4->flowi4_oif = sk->sk_bound_dev_if; - fl4->daddr = daddr; - fl4->saddr = inet_sk(sk)->inet_saddr; - fl4->flowi4_tos = RT_CONN_FLAGS(sk); - fl4->flowi4_proto = sk->sk_protocol; - - return ip_route_output_key(sock_net(sk), fl4); -} - static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) { int payload_len = skb->len; @@ -479,6 +467,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, struct rtable *rt; struct flowi4 fl4; struct iphdr *iph; + struct sock *sk; + __be32 saddr; __be16 df; int mtu; @@ -498,19 +488,27 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, } netdev_dbg(dev, "found PDP context %p\n", pctx); - rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr); - if (IS_ERR(rt)) { - netdev_dbg(dev, "no route to SSGN %pI4\n", - &pctx->peer_addr_ip4.s_addr); - dev->stats.tx_carrier_errors++; - goto err; - } + sk = pctx->sk; + saddr = inet_sk(sk)->inet_saddr; - if (rt->dst.dev == dev) { - netdev_dbg(dev, "circular route to SSGN %pI4\n", - &pctx->peer_addr_ip4.s_addr); - dev->stats.collisions++; - goto err_rt; + rt = ip_tunnel_get_route(dev, skb, sk->sk_protocol, + sk->sk_bound_dev_if, RT_CONN_FLAGS(sk), + pctx->peer_addr_ip4.s_addr, &saddr, + pktinfo->gtph_port, pktinfo->gtph_port, + &pctx->dst_cache, NULL); + + if (IS_ERR(rt)) { + if (rt == ERR_PTR(-ELOOP)) { + netdev_dbg(dev, "circular route to SSGN %pI4\n", + &pctx->peer_addr_ip4.s_addr); + dev->stats.collisions++; + goto err_rt; + } else { + netdev_dbg(dev, "no route to SSGN %pI4\n", + &pctx->peer_addr_ip4.s_addr); + dev->stats.tx_carrier_errors++; + goto err; + } } skb_dst_drop(skb); @@ -543,7 +541,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, goto err_rt; } - gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev); + gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev); gtp_push_header(skb, pktinfo); return 0; @@ -917,6 +915,7 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, struct pdp_ctx *pctx; bool found = false; __be32 ms_addr; + int err; ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; @@ -951,6 +950,12 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, if (pctx == NULL) return -ENOMEM; + err = dst_cache_init(&pctx->dst_cache, GFP_KERNEL); + if (err) { + kfree(pctx); + return err; + } + sock_hold(sk); pctx->sk = sk; pctx->dev = gtp->dev; From patchwork Tue Sep 19 00:38:54 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815218 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="xhHOua33"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3tM69cdz9s5L for ; Tue, 19 Sep 2017 10:39:55 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751378AbdISAjo (ORCPT ); Mon, 18 Sep 2017 20:39:44 -0400 Received: from mail-pf0-f176.google.com ([209.85.192.176]:53059 "EHLO mail-pf0-f176.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751227AbdISAjm (ORCPT ); Mon, 18 Sep 2017 20:39:42 -0400 Received: by mail-pf0-f176.google.com with SMTP id p87so1103621pfj.9 for ; Mon, 18 Sep 2017 17:39:42 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=cf1PHU9itUm9XgenGeS6L3jYR1k9PqG510l88SnyXO0=; b=xhHOua336d0A21CIkcB+giRb01/r1BsZ4ex9IgXjEIBy79cobIfgKl1//qyraJ0KgP F1pNjaMTCz7aD4bmV6T04vXezJPo4iGeUS6CFeT0ISZQGGYPHz6zi3UMP6i+sxN6Q3QH V6id6YuM+8A45ilL+wZGYNpUH0PN3CbR2HfYcJcwxYG1fUTZdCxsFivG2Ew0gdmVaHLS ifeXODWj9K8JA47gseRd0CMdCgsV5bAwJUaUEtIynO3iHz43JGUQef2qW4EsC2B4ScBW BYfAvwMXAyfk0HUA9XWiGPuNtwmJWxMGqZ0+6VI+6dowfz0l3sO4sWai5iSfHdoSaKaC CBTQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=cf1PHU9itUm9XgenGeS6L3jYR1k9PqG510l88SnyXO0=; b=pbO6v2KeGzks0mIjOGHB1edhWiPb/jWyrml+98e6K+iC6++3QS5Z50SO5YLYuUQqmD 7ozc1fK8dmN+0RuZ0d+nNY3q9tE11BgbJN//h1xmaxX11XTVO0Rr8uiY2iqFmPg4beal Ai9ilJzF1XfNng3p+jGStv0i7qIn2qbIFBlHNfePytbM8M18ToFS3VZpQhy3uvyA5SOk TUThxtDa5VsvZ8O6j4XZQmbqkaQhOgt/ynGwsDqTJ+G4iOoJpW89cACUSDqsWiB7Roxb NOpqGq76chrenICB3DosVhwIT0mOLyFC3poXeAOUqRyqU1Ol1jJ2Lmpg/tX5M0gxcvOW MaDA== X-Gm-Message-State: AHPjjUit5LwifCk6w1EaagDnbWNe0myvs18PreeDHGYw/fq5GRPQNjAk r4jhOHj25JJdbWqW X-Google-Smtp-Source: AOwi7QBfZxXrYU1v7o6Mqtdh5kc3V6jgYsweYNo5qg/9H7xsRRPDiYiUl3Dh3vglk52IPIhotuZEoA== X-Received: by 10.98.26.201 with SMTP id a192mr309961pfa.311.1505781581978; Mon, 18 Sep 2017 17:39:41 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.39.40 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:39:41 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 04/14] gtp: udp recv clean up Date: Mon, 18 Sep 2017 17:38:54 -0700 Message-Id: <20170919003904.5124-5-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Create separate UDP receive functions for GTP version 0 and version 1. Set encap_rcv appropriately when configuring a socket. Also, convert to using gro_cells. Signed-off-by: Tom Herbert --- drivers/net/gtp.c | 130 +++++++++++++++++++++++++++++------------------------- 1 file changed, 71 insertions(+), 59 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 95df3bcebbb2..1de2ea6217ea 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -80,6 +80,8 @@ struct gtp_dev { unsigned int hash_size; struct hlist_head *tid_hash; struct hlist_head *addr_hash; + + struct gro_cells gro_cells; }; static unsigned int gtp_net_id __read_mostly; @@ -217,55 +219,83 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, stats->rx_bytes += skb->len; u64_stats_update_end(&stats->syncp); - netif_rx(skb); + gro_cells_receive(>p->gro_cells, skb); + return 0; } -/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */ -static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) +/* UDP encapsulation receive handler for GTPv0-U . See net/ipv4/udp.c. + * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket. + */ +static int gtp0_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { + struct gtp_dev *gtp = rcu_dereference_sk_user_data(sk); unsigned int hdrlen = sizeof(struct udphdr) + sizeof(struct gtp0_header); struct gtp0_header *gtp0; struct pdp_ctx *pctx; + if (!gtp) + goto pass; + if (!pskb_may_pull(skb, hdrlen)) - return -1; + goto drop; gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); if ((gtp0->flags >> 5) != GTP_V0) - return 1; + goto pass; if (gtp0->type != GTP_TPDU) - return 1; + goto pass; + + netdev_dbg(gtp->dev, "received GTP0 packet\n"); pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); - return 1; + goto pass; + } + + if (!gtp_rx(pctx, skb, hdrlen, gtp->role)) { + /* Successfully received */ + return 0; } - return gtp_rx(pctx, skb, hdrlen, gtp->role); +drop: + kfree_skb(skb); + return 0; + +pass: + return 1; } -static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) +/* UDP encapsulation receive handler for GTPv0-U . See net/ipv4/udp.c. + * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket. + */ +static int gtp1u_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { + struct gtp_dev *gtp = rcu_dereference_sk_user_data(sk); unsigned int hdrlen = sizeof(struct udphdr) + sizeof(struct gtp1_header); struct gtp1_header *gtp1; struct pdp_ctx *pctx; + if (!gtp) + goto pass; + if (!pskb_may_pull(skb, hdrlen)) - return -1; + goto drop; gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); if ((gtp1->flags >> 5) != GTP_V1) - return 1; + goto pass; if (gtp1->type != GTP_TPDU) - return 1; + goto pass; + + netdev_dbg(gtp->dev, "received GTP1 packet\n"); /* From 29.060: "This field shall be present if and only if any one or * more of the S, PN and E flags are set.". @@ -278,17 +308,27 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) /* Make sure the header is larger enough, including extensions. */ if (!pskb_may_pull(skb, hdrlen)) - return -1; + goto drop; gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); - return 1; + goto pass; + } + + if (!gtp_rx(pctx, skb, hdrlen, gtp->role)) { + /* Successfully received */ + return 0; } - return gtp_rx(pctx, skb, hdrlen, gtp->role); +drop: + kfree_skb(skb); + return 0; + +pass: + return 1; } static void gtp_encap_destroy(struct sock *sk) @@ -317,49 +357,6 @@ static void gtp_encap_disable(struct gtp_dev *gtp) gtp_encap_disable_sock(gtp->sk1u); } -/* UDP encapsulation receive handler. See net/ipv4/udp.c. - * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket. - */ -static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) -{ - struct gtp_dev *gtp; - int ret = 0; - - gtp = rcu_dereference_sk_user_data(sk); - if (!gtp) - return 1; - - netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); - - switch (udp_sk(sk)->encap_type) { - case UDP_ENCAP_GTP0: - netdev_dbg(gtp->dev, "received GTP0 packet\n"); - ret = gtp0_udp_encap_recv(gtp, skb); - break; - case UDP_ENCAP_GTP1U: - netdev_dbg(gtp->dev, "received GTP1U packet\n"); - ret = gtp1u_udp_encap_recv(gtp, skb); - break; - default: - ret = -1; /* Shouldn't happen. */ - } - - switch (ret) { - case 1: - netdev_dbg(gtp->dev, "pass up to the process\n"); - break; - case 0: - break; - case -1: - netdev_dbg(gtp->dev, "GTP packet has been dropped\n"); - kfree_skb(skb); - ret = 0; - break; - } - - return ret; -} - static int gtp_dev_init(struct net_device *dev) { struct gtp_dev *gtp = netdev_priv(dev); @@ -627,6 +624,8 @@ static void gtp_link_setup(struct net_device *dev) sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(struct gtp0_header); + + gro_cells_init(>p->gro_cells, dev); } static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); @@ -683,6 +682,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head) { struct gtp_dev *gtp = netdev_priv(dev); + gro_cells_destroy(>p->gro_cells); gtp_encap_disable(gtp); gtp_hashtable_free(gtp); list_del_rcu(>p->list); @@ -804,9 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, sk = sock->sk; sock_hold(sk); + switch (type) { + case UDP_ENCAP_GTP0: + tuncfg.encap_rcv = gtp0_udp_encap_recv; + break; + case UDP_ENCAP_GTP1U: + tuncfg.encap_rcv = gtp1u_udp_encap_recv; + break; + default: + pr_debug("Unknown encap type %u\n", type); + sk = ERR_PTR(-EINVAL); + goto out_sock; + } + tuncfg.sk_user_data = gtp; tuncfg.encap_type = type; - tuncfg.encap_rcv = gtp_encap_recv; tuncfg.encap_destroy = gtp_encap_destroy; setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); From patchwork Tue Sep 19 00:38:55 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815219 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="SGte6+bP"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3tN4S5wz9s78 for ; Tue, 19 Sep 2017 10:39:56 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751450AbdISAjr (ORCPT ); Mon, 18 Sep 2017 20:39:47 -0400 Received: from mail-pg0-f43.google.com ([74.125.83.43]:44863 "EHLO mail-pg0-f43.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751377AbdISAjo (ORCPT ); Mon, 18 Sep 2017 20:39:44 -0400 Received: by mail-pg0-f43.google.com with SMTP id j16so1094024pga.1 for ; Mon, 18 Sep 2017 17:39:44 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=vqzZUqff18NcNFh3+wi3bAvTQXk8vwMBp39XFhprHUU=; b=SGte6+bPUy8+CmpkzwoG4qH26AmM8yOFVwGvlVQHf3uegou2227OTZ3g1D0Z1yMSGD aNROTD/1tPSgNqwZt6dEA8jnYppm0dLKdV0EGX/O8x5BzWa4+Z0MonR+PKorCYtshH7G /OdzKoj815eVwM83LnoI8PcT7RzXXNZx2O9OfMcgt0h6D0Dbxn3/DVLNpGrl0Honkiwr IlpvBdxv8VqOdgc23dxT7bj1mW+yhpnJCGala7TPZMFz4RqBjS2LJ+/hJYbqoZ9fWEqC HrOJQLl3GsObU4ldcupRC+xbUz6HqvQr2OxVoZCnmqYNIo7a+ZxYkerwNIAzFLK99HkB evjg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=vqzZUqff18NcNFh3+wi3bAvTQXk8vwMBp39XFhprHUU=; b=ratSEZMWyQ++Sn9yiLR/7irisuTbFV0c8gZ4h/ut2/IJMihCnHmqLbM9uTRtUQzfvQ uTwGnv7/uyQN1usaCwdr1HAcPdchbRyTUPlMMqrtnmZFXjU5gg5rL7BLCs+T0lnoAQz0 Hy0KkfRl2/i5CV7k9JYdKZJDgYMtsHfmT6KFuPJVst+jx58NBB5PVKalXF4QbQIoBKue 0QaS0oLJbgUZU/oqXwINDnIDXAi62H9Zm5GAOmPg21QXiHccFzKpSpPgVNeSeIJ15Yg4 c+DA0wXtDDGPbtgtKf0XxnmOKE7AoeIKQNgXuKYifbqTCx6t+Whf1P6n5cHvsmTIMeWT rOUw== X-Gm-Message-State: AHPjjUhz7Az5C0yC0dtRKuXW6TgIuzchrMkFp4stREcqVmtxVnVPkPUu 4xg7QG44il4nSSVF X-Google-Smtp-Source: AOwi7QDRLfBsLgFb5fyYBvqfT8Q5zUdeU0yDLJCc/yUaLue9fjLsX5HUxrHJif6LtRJ4//1btcXEmQ== X-Received: by 10.99.180.7 with SMTP id s7mr336148pgf.171.1505781584390; Mon, 18 Sep 2017 17:39:44 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.39.42 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:39:43 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 05/14] gtp: Remove special mtu handling Date: Mon, 18 Sep 2017 17:38:55 -0700 Message-Id: <20170919003904.5124-6-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Removes MTU handling in gtp_build_skb_ip4. This is non standard relative to how other tunneling protocols handle MTU. The model espoused is that the inner interface should set it's MTU to be less than the expected path MTU on the overlay network. Path MTU discovery is not typically used for modifying tunnel MTUs. Signed-off-by: Tom Herbert --- drivers/net/gtp.c | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 1de2ea6217ea..f2089fa4f004 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -466,8 +466,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, struct iphdr *iph; struct sock *sk; __be32 saddr; - __be16 df; - int mtu; /* Read the IP destination address and resolve the PDP context. * Prepend PDP header with TEI/TID from PDP ctx. @@ -510,34 +508,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, skb_dst_drop(skb); - /* This is similar to tnl_update_pmtu(). */ - df = iph->frag_off; - if (df) { - mtu = dst_mtu(&rt->dst) - dev->hard_header_len - - sizeof(struct iphdr) - sizeof(struct udphdr); - switch (pctx->gtp_version) { - case GTP_V0: - mtu -= sizeof(struct gtp0_header); - break; - case GTP_V1: - mtu -= sizeof(struct gtp1_header); - break; - } - } else { - mtu = dst_mtu(&rt->dst); - } - - rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu); - - if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && - mtu < ntohs(iph->tot_len)) { - netdev_dbg(dev, "packet too big, fragmentation needed\n"); - memset(IPCB(skb), 0, sizeof(*IPCB(skb))); - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, - htonl(mtu)); - goto err_rt; - } - gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev); gtp_push_header(skb, pktinfo); From patchwork Tue Sep 19 00:38:56 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815227 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="mAxQJFpE"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3v04b30z9s5L for ; Tue, 19 Sep 2017 10:40:28 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751458AbdISAjv (ORCPT ); Mon, 18 Sep 2017 20:39:51 -0400 Received: from mail-pf0-f182.google.com ([209.85.192.182]:54209 "EHLO mail-pf0-f182.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751227AbdISAjr (ORCPT ); Mon, 18 Sep 2017 20:39:47 -0400 Received: by mail-pf0-f182.google.com with SMTP id x78so1102918pff.10 for ; Mon, 18 Sep 2017 17:39:47 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=T62aXLJJ9uOagMMauVYCS+CpAF3HwlzU5ay0IH874Bg=; b=mAxQJFpErHfzE2bvsC50YZCK0qxroTlN2FlCX+4L6UiOBBgYS8M6CtJGkv7n+K3tBu ZdglR5+dXa+PWwOc7M5FWXCWaDxZJzJWdVemC4G+B1sVjJhWo0Jo2KbHVMvO6l+coYTe GnD5h4bmb/3oIeBL/BGEJI3M1V+oeJWz1yKWz5mWgkSluiWcUPOgitmD/7KJY6Q7rF5y GYKq1BHIWYf71NbtfTdsB25ppwa/P/KNnVmBJAKPjXQSKJBzrLzwUQ2vxkmeLYKhGILN Sl1kA5d48ffGibhIMoXHJMQ6iy2zgGvEEkPheVSW5aq3xK2fZYOK7lgG4BtWQgsQgFiS WVUA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=T62aXLJJ9uOagMMauVYCS+CpAF3HwlzU5ay0IH874Bg=; b=COE7Y34URzbdJvJnoUhpZ/YwCmerZOSAdpHFl62ZmBNzo1ohwK4+YBekRH2lHgs7YE uelPdZpXKp76wn1XCcp76nlTKji21J4j8O8KM1KFudN6QI3Fej/EI6SXlggijiCGEkoe ENYjyx8RuCrSEVEIgwBs/VBiK6fYC5HBWg9ZqoTqhONUIo74DmxEVbtS5YHQ3RzzofMx FcUgP4KfaAs+kRd0TnCYPdIgFa/Y5dNpTFlzTHz4qh30j/D5fx4wMuFxqgY9paxE/SL0 fkMNgz2PNfXk+mz5r8BH2DSAKhVrrQfn22VJ1ITutoLyYZw7XMSv80HSDPQ7rlaLtLAK rcwg== X-Gm-Message-State: AHPjjUha7DSMXhLBVwBbyX4LSyp4vw8Y6UhuxYdURq0/8V24YusHeeFb yvdm/sCDv6xCyySoawM= X-Google-Smtp-Source: AOwi7QB2f89t21W62dw2Io3+xHaK1XGnfAfiRd38M/8RNqYfQNzRZOJFfgE/J1nSdIetBrJNRiYStQ== X-Received: by 10.98.25.71 with SMTP id 68mr328871pfz.65.1505781586677; Mon, 18 Sep 2017 17:39:46 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.39.45 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:39:45 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 06/14] gtp: Eliminate pktinfo and add port configuration Date: Mon, 18 Sep 2017 17:38:56 -0700 Message-Id: <20170919003904.5124-7-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org The gtp pktinfo structure is unnecessary and needs a lot of code to manage it. Remove it. Also, add per pdp port configuration for transmit. Signed-off-by: Tom Herbert --- drivers/net/gtp.c | 167 ++++++++++++++++++++--------------------------- include/uapi/linux/gtp.h | 1 + 2 files changed, 71 insertions(+), 97 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index f2089fa4f004..a928279c382c 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -53,6 +53,7 @@ struct pdp_ctx { } v1; } u; u8 gtp_version; + __be16 gtp_port; u16 af; struct in_addr ms_addr_ip4; @@ -418,149 +419,112 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) */ } -struct gtp_pktinfo { - struct sock *sk; - struct iphdr *iph; - struct flowi4 fl4; - struct rtable *rt; - struct pdp_ctx *pctx; - struct net_device *dev; - __be16 gtph_port; -}; - -static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo) +static void gtp_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) { - switch (pktinfo->pctx->gtp_version) { + switch (pctx->gtp_version) { case GTP_V0: - pktinfo->gtph_port = htons(GTP0_PORT); - gtp0_push_header(skb, pktinfo->pctx); + gtp0_push_header(skb, pctx); break; case GTP_V1: - pktinfo->gtph_port = htons(GTP1U_PORT); - gtp1_push_header(skb, pktinfo->pctx); + gtp1_push_header(skb, pctx); break; } } -static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo, - struct sock *sk, struct iphdr *iph, - struct pdp_ctx *pctx, struct rtable *rt, - struct flowi4 *fl4, - struct net_device *dev) -{ - pktinfo->sk = sk; - pktinfo->iph = iph; - pktinfo->pctx = pctx; - pktinfo->rt = rt; - pktinfo->fl4 = *fl4; - pktinfo->dev = dev; -} - -static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, - struct gtp_pktinfo *pktinfo) +static int gtp_xmit(struct sk_buff *skb, struct net_device *dev, + struct pdp_ctx *pctx) { - struct gtp_dev *gtp = netdev_priv(dev); - struct pdp_ctx *pctx; + struct sock *sk = pctx->sk; + __be32 saddr = inet_sk(sk)->inet_saddr; struct rtable *rt; - struct flowi4 fl4; - struct iphdr *iph; - struct sock *sk; - __be32 saddr; - - /* Read the IP destination address and resolve the PDP context. - * Prepend PDP header with TEI/TID from PDP ctx. - */ - iph = ip_hdr(skb); - if (gtp->role == GTP_ROLE_SGSN) - pctx = ipv4_pdp_find(gtp, iph->saddr); - else - pctx = ipv4_pdp_find(gtp, iph->daddr); + int err = 0; - if (!pctx) { - netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", - &iph->daddr); - return -ENOENT; - } - netdev_dbg(dev, "found PDP context %p\n", pctx); + /* Ensure there is sufficient headroom. */ + err = skb_cow_head(skb, dev->needed_headroom); + if (unlikely(err)) + goto out_err; - sk = pctx->sk; - saddr = inet_sk(sk)->inet_saddr; + skb_reset_inner_headers(skb); rt = ip_tunnel_get_route(dev, skb, sk->sk_protocol, sk->sk_bound_dev_if, RT_CONN_FLAGS(sk), pctx->peer_addr_ip4.s_addr, &saddr, - pktinfo->gtph_port, pktinfo->gtph_port, + pctx->gtp_port, pctx->gtp_port, &pctx->dst_cache, NULL); if (IS_ERR(rt)) { - if (rt == ERR_PTR(-ELOOP)) { - netdev_dbg(dev, "circular route to SSGN %pI4\n", - &pctx->peer_addr_ip4.s_addr); - dev->stats.collisions++; - goto err_rt; - } else { - netdev_dbg(dev, "no route to SSGN %pI4\n", - &pctx->peer_addr_ip4.s_addr); - dev->stats.tx_carrier_errors++; - goto err; - } + err = PTR_ERR(rt); + goto out_err; } skb_dst_drop(skb); - gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev); - gtp_push_header(skb, pktinfo); + gtp_push_header(skb, pctx); + udp_tunnel_xmit_skb(rt, sk, skb, saddr, + pctx->peer_addr_ip4.s_addr, + 0, ip4_dst_hoplimit(&rt->dst), 0, + pctx->gtp_port, pctx->gtp_port, + false, false); + + netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n", + &saddr, &pctx->peer_addr_ip4.s_addr); return 0; -err_rt: - ip_rt_put(rt); -err: - return -EBADMSG; + +out_err: + if (err == -ELOOP) + dev->stats.collisions++; + else + dev->stats.tx_carrier_errors++; + + return err; } static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned int proto = ntohs(skb->protocol); - struct gtp_pktinfo pktinfo; + struct gtp_dev *gtp = netdev_priv(dev); + struct pdp_ctx *pctx; int err; - /* Ensure there is sufficient headroom. */ - if (skb_cow_head(skb, dev->needed_headroom)) - goto tx_err; - - skb_reset_inner_headers(skb); - /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */ rcu_read_lock(); switch (proto) { - case ETH_P_IP: - err = gtp_build_skb_ip4(skb, dev, &pktinfo); + case ETH_P_IP: { + struct iphdr *iph = ip_hdr(skb); + + if (gtp->role == GTP_ROLE_SGSN) + pctx = ipv4_pdp_find(gtp, iph->saddr); + else + pctx = ipv4_pdp_find(gtp, iph->daddr); + + if (!pctx) { + netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", + &iph->daddr); + err = -ENOENT; + goto tx_err; + } + break; + } default: err = -EOPNOTSUPP; - break; + goto tx_err; } - rcu_read_unlock(); + + netdev_dbg(dev, "found PDP context %p\n", pctx); + + err = gtp_xmit(skb, dev, pctx); if (err < 0) goto tx_err; - switch (proto) { - case ETH_P_IP: - netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n", - &pktinfo.iph->saddr, &pktinfo.iph->daddr); - udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb, - pktinfo.fl4.saddr, pktinfo.fl4.daddr, - pktinfo.iph->tos, - ip4_dst_hoplimit(&pktinfo.rt->dst), - 0, - pktinfo.gtph_port, pktinfo.gtph_port, - true, false); - break; - } + rcu_read_unlock(); return NETDEV_TX_OK; + tx_err: + rcu_read_unlock(); dev->stats.tx_errors++; dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -864,6 +828,8 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[]) static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) { + __be16 default_port = 0; + pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); pctx->af = AF_INET; pctx->peer_addr_ip4.s_addr = @@ -879,14 +845,21 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) */ pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]); pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]); + default_port = htons(GTP0_PORT); break; case GTP_V1: pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]); pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]); + default_port = htons(GTP1U_PORT); break; default: break; } + + if (info->attrs[GTPA_PORT]) + pctx->gtp_port = nla_get_u16(info->attrs[GTPA_PORT]); + else + pctx->gtp_port = default_port; } static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h index 57d1edb8efd9..b2283a5c6d7f 100644 --- a/include/uapi/linux/gtp.h +++ b/include/uapi/linux/gtp.h @@ -27,6 +27,7 @@ enum gtp_attrs { GTPA_I_TEI, /* for GTPv1 only */ GTPA_O_TEI, /* for GTPv1 only */ GTPA_PAD, + GTPA_PORT, __GTPA_MAX, }; #define GTPA_MAX (__GTPA_MAX + 1) From patchwork Tue Sep 19 00:38:57 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815220 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="Mo3c9R3T"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3tP3Gdjz9s5L for ; Tue, 19 Sep 2017 10:39:57 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751479AbdISAjx (ORCPT ); Mon, 18 Sep 2017 20:39:53 -0400 Received: from mail-pg0-f50.google.com ([74.125.83.50]:46857 "EHLO mail-pg0-f50.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751451AbdISAjt (ORCPT ); Mon, 18 Sep 2017 20:39:49 -0400 Received: by mail-pg0-f50.google.com with SMTP id i130so1093412pgc.3 for ; Mon, 18 Sep 2017 17:39:49 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=7jj9tsecwYu6QUxWcVnpBBA/85fR3gTlsBVddo9iDmU=; b=Mo3c9R3TmFDSYL327GzljBdmSs8lVrhywRkaDclRvdhX6saj0wFv0M1OJ3tRdb9api 3GrhwvyzUCl2FVOPyqpKNgyYwLQEdVci8LE4xg316wMqWqG3u0oIP2OkhEANlxI+9Odh 8NaSp3XwCPyD+03P1OqAdZtKhOPHYxxCDcLn83Cp59nCfBXDBejp1Gtq2EiL72TpSzwI MunTpRasvv+Wg8Am/GMG8EGurWjWJlcfrBIJl7Az2KNcbHZtxmMGrsPyTeYx/lfCLqIu ku2D9oOL2BeqMMX8UCWi3caAEF+e6ASAhCektSiFSbTuoB0zWZ9wTWuUTP9DkRrOOZVf 6mQg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=7jj9tsecwYu6QUxWcVnpBBA/85fR3gTlsBVddo9iDmU=; b=QGtgWk4Vs2ETVnIj9639gSQxUmfmQm9BAM84qAVAQJSBz4vyErDRXlouJNW6auIXE0 QobM8RxF6BvBsnoSrJ1Ix2WmSFdMpKb9h2yaVrGxUjLc6WsqERkEu7hltus8YcoaJS2Y R8aQ/hxDqPJpgSULpE1/m8epaIxkZKVKK5rWBiTirGOrWzNIngm7qZVByIwu++UQKbQf 9MV+2Jr4ROdOh/AjyMbt7td/AThouDtkQa/9Tkp9N8Dr3bPVVqKlHLp4EIJHAEYVXoZW QuR8Pod5yxqRMqWTIqnAZ+Yx6Qn9IPQbtWhD6JzrPz669bNiTJb1irTD+a4nuIrAjssK TbVg== X-Gm-Message-State: AHPjjUhTpb5O9sv6SJ9u6ciXrPNzglt/LD8tjbKnoOlWvQTNs8f6qJQx q2YfXeyGxt6QpG7P X-Google-Smtp-Source: AOwi7QDI1MWfjgGa+M1Xy0UVapOTzF+OUhWD5Msc2KxkRlRyKKs0A2s7tl8oSwnTPphY5RCegV7wDQ== X-Received: by 10.98.71.20 with SMTP id u20mr325073pfa.23.1505781589009; Mon, 18 Sep 2017 17:39:49 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.39.47 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:39:48 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 07/14] gtp: Support encapsulation of IPv6 packets Date: Mon, 18 Sep 2017 17:38:57 -0700 Message-Id: <20170919003904.5124-8-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Allow IPv6 mobile subscriber packets. This entails adding an IPv6 mobile subscriber address to pdp context and IPv6 specific variants to find pdp contexts by address. Signed-off-by: Tom Herbert --- drivers/net/gtp.c | 259 +++++++++++++++++++++++++++++++++++++---------- include/uapi/linux/gtp.h | 1 + 2 files changed, 209 insertions(+), 51 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index a928279c382c..62c0c968efa6 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -54,9 +54,13 @@ struct pdp_ctx { } u; u8 gtp_version; __be16 gtp_port; - u16 af; - struct in_addr ms_addr_ip4; + u16 ms_af; + union { + struct in_addr ms_addr_ip4; + struct in6_addr ms_addr_ip6; + }; + struct in_addr peer_addr_ip4; struct sock *sk; @@ -80,7 +84,9 @@ struct gtp_dev { unsigned int role; unsigned int hash_size; struct hlist_head *tid_hash; - struct hlist_head *addr_hash; + + struct hlist_head *addr4_hash; + struct hlist_head *addr6_hash; struct gro_cells gro_cells; }; @@ -98,6 +104,7 @@ static void pdp_context_delete(struct pdp_ctx *pctx); static inline u32 gtp0_hashfn(u64 tid) { u32 *tid32 = (u32 *) &tid; + return jhash_2words(tid32[0], tid32[1], gtp_h_initval); } @@ -111,6 +118,11 @@ static inline u32 ipv4_hashfn(__be32 ip) return jhash_1word((__force u32)ip, gtp_h_initval); } +static inline u32 ipv6_hashfn(const struct in6_addr *a) +{ + return __ipv6_addr_jhash(a, gtp_h_initval); +} + /* Resolve a PDP context structure based on the 64bit TID. */ static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid) { @@ -149,10 +161,10 @@ static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr) struct hlist_head *head; struct pdp_ctx *pdp; - head = >p->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size]; + head = >p->addr4_hash[ipv4_hashfn(ms_addr) % gtp->hash_size]; hlist_for_each_entry_rcu(pdp, head, hlist_addr) { - if (pdp->af == AF_INET && + if (pdp->ms_af == AF_INET && pdp->ms_addr_ip4.s_addr == ms_addr) return pdp; } @@ -176,32 +188,95 @@ static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx, return iph->saddr == pctx->ms_addr_ip4.s_addr; } +/* Resolve a PDP context based on IPv6 address of MS. */ +static struct pdp_ctx *ipv6_pdp_find(struct gtp_dev *gtp, + const struct in6_addr *ms_addr) +{ + struct hlist_head *head; + struct pdp_ctx *pdp; + + head = >p->addr6_hash[ipv6_hashfn(ms_addr) % gtp->hash_size]; + + hlist_for_each_entry_rcu(pdp, head, hlist_addr) { + if (pdp->ms_af == AF_INET6 && + ipv6_addr_equal(&pdp->ms_addr_ip6, ms_addr)) + return pdp; + } + + return NULL; +} + +static bool gtp_check_ms_ipv6(struct sk_buff *skb, struct pdp_ctx *pctx, + unsigned int hdrlen, unsigned int role) +{ + struct ipv6hdr *ipv6h; + + if (!pskb_may_pull(skb, hdrlen + sizeof(struct ipv6hdr))) + return false; + + ipv6h = (struct ipv6hdr *)(skb->data + hdrlen); + + if (role == GTP_ROLE_SGSN) + return ipv6_addr_equal(&ipv6h->daddr, &pctx->ms_addr_ip6); + else + return ipv6_addr_equal(&ipv6h->saddr, &pctx->ms_addr_ip6); +} + /* Check if the inner IP address in this packet is assigned to any * existing mobile subscriber. */ static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx, unsigned int hdrlen, unsigned int role) { - switch (ntohs(skb->protocol)) { - case ETH_P_IP: + struct iphdr *iph; + + /* Minimally there needs to be an IPv4 header */ + if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr))) + return false; + + iph = (struct iphdr *)(skb->data + hdrlen); + + switch (iph->version) { + case 4: return gtp_check_ms_ipv4(skb, pctx, hdrlen, role); + case 6: + return gtp_check_ms_ipv6(skb, pctx, hdrlen, role); } + return false; } +static u16 ipver_to_eth(struct iphdr *iph) +{ + switch (iph->version) { + case 4: + return htons(ETH_P_IP); + case 6: + return htons(ETH_P_IPV6); + default: + return 0; + } +} + static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, - unsigned int hdrlen, unsigned int role) + unsigned int hdrlen, unsigned int role) { struct pcpu_sw_netstats *stats; + u16 inner_protocol; if (!gtp_check_ms(skb, pctx, hdrlen, role)) { netdev_dbg(pctx->dev, "No PDP ctx for this MS\n"); return 1; } + inner_protocol = ipver_to_eth((struct iphdr *)(skb->data + hdrlen)); + if (!inner_protocol) + return -1; + /* Get rid of the GTP + UDP headers. */ - if (iptunnel_pull_header(skb, hdrlen, skb->protocol, - !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) + if (iptunnel_pull_header(skb, hdrlen, inner_protocol, + !net_eq(sock_net(pctx->sk), + dev_net(pctx->dev)))) return -1; netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n"); @@ -239,7 +314,8 @@ static int gtp0_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (!gtp) goto pass; - if (!pskb_may_pull(skb, hdrlen)) + /* Pull through IP header since gtp_rx looks at IP version */ + if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr))) goto drop; gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); @@ -285,7 +361,8 @@ static int gtp1u_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (!gtp) goto pass; - if (!pskb_may_pull(skb, hdrlen)) + /* Pull through IP header since gtp_rx looks at IP version */ + if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr))) goto drop; gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); @@ -307,8 +384,10 @@ static int gtp1u_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (gtp1->flags & GTP1_F_MASK) hdrlen += 4; - /* Make sure the header is larger enough, including extensions. */ - if (!pskb_may_pull(skb, hdrlen)) + /* Make sure the header is larger enough, including extensions and + * also an IP header since gtp_rx looks at IP version + */ + if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr))) goto drop; gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); @@ -389,7 +468,8 @@ static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) gtp0->flags = 0x1e; /* v0, GTP-non-prime. */ gtp0->type = GTP_TPDU; gtp0->length = htons(payload_len); - gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff); + gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % + 0xffff); gtp0->flow = htons(pctx->u.v0.flow); gtp0->number = 0xff; gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff; @@ -507,6 +587,23 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) break; } + case ETH_P_IPV6: { + struct ipv6hdr *ipv6h = ipv6_hdr(skb); + + if (gtp->role == GTP_ROLE_SGSN) + pctx = ipv6_pdp_find(gtp, &ipv6h->saddr); + else + pctx = ipv6_pdp_find(gtp, &ipv6h->daddr); + + if (!pctx) { + netdev_dbg(dev, "no PDP ctx found for %pI6, skip\n", + &ipv6h->daddr); + err = -ENOENT; + goto tx_err; + } + + break; + } default: err = -EOPNOTSUPP; goto tx_err; @@ -674,23 +771,32 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize) { int i; - gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL); - if (gtp->addr_hash == NULL) - return -ENOMEM; + gtp->addr4_hash = kmalloc_array(hsize, sizeof(*gtp->addr4_hash), + GFP_KERNEL); + if (!gtp->addr4_hash) + goto err; + + gtp->addr6_hash = kmalloc_array(hsize, sizeof(*gtp->addr6_hash), + GFP_KERNEL); + if (!gtp->addr6_hash) + goto err; - gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL); - if (gtp->tid_hash == NULL) - goto err1; + gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head), + GFP_KERNEL); + if (!gtp->tid_hash) + goto err; gtp->hash_size = hsize; for (i = 0; i < hsize; i++) { - INIT_HLIST_HEAD(>p->addr_hash[i]); + INIT_HLIST_HEAD(>p->addr4_hash[i]); + INIT_HLIST_HEAD(>p->addr6_hash[i]); INIT_HLIST_HEAD(>p->tid_hash[i]); } return 0; -err1: - kfree(gtp->addr_hash); +err: + kfree(gtp->addr4_hash); + kfree(gtp->addr6_hash); return -ENOMEM; } @@ -704,7 +810,8 @@ static void gtp_hashtable_free(struct gtp_dev *gtp) pdp_context_delete(pctx); synchronize_rcu(); - kfree(gtp->addr_hash); + kfree(gtp->addr4_hash); + kfree(gtp->addr6_hash); kfree(gtp->tid_hash); } @@ -826,16 +933,13 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[]) return gtp; } -static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) +static void pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) { __be16 default_port = 0; pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); - pctx->af = AF_INET; pctx->peer_addr_ip4.s_addr = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); - pctx->ms_addr_ip4.s_addr = - nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); switch (pctx->gtp_version) { case GTP_V0: @@ -862,33 +966,46 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) pctx->gtp_port = default_port; } -static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, - struct genl_info *info) +static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, + struct genl_info *info) { struct net_device *dev = gtp->dev; + struct hlist_head *addr_list; + struct pdp_ctx *pctx = NULL; u32 hash_ms, hash_tid = 0; - struct pdp_ctx *pctx; - bool found = false; - __be32 ms_addr; + struct in6_addr ms6_addr; + __be32 ms_addr = 0; + int ms_af; int err; - ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); - hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; + /* Caller ensures we have either v4 or v6 mobile subscriber address */ + if (info->attrs[GTPA_MS_ADDRESS]) { + /* IPv4 mobile subscriber */ - hlist_for_each_entry_rcu(pctx, >p->addr_hash[hash_ms], hlist_addr) { - if (pctx->ms_addr_ip4.s_addr == ms_addr) { - found = true; - break; - } + ms_addr = nla_get_in_addr(info->attrs[GTPA_MS_ADDRESS]); + hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; + addr_list = >p->addr4_hash[hash_ms]; + ms_af = AF_INET; + + pctx = ipv4_pdp_find(gtp, ms_addr); + } else { + /* IPv6 mobile subscriber */ + + ms6_addr = nla_get_in6_addr(info->attrs[GTPA_MS6_ADDRESS]); + hash_ms = ipv6_hashfn(&ms6_addr) % gtp->hash_size; + addr_list = >p->addr6_hash[hash_ms]; + ms_af = AF_INET6; + + pctx = ipv6_pdp_find(gtp, &ms6_addr); } - if (found) { + if (pctx) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; - ipv4_pdp_fill(pctx, info); + pdp_fill(pctx, info); if (pctx->gtp_version == GTP_V0) netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n", @@ -914,7 +1031,18 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, sock_hold(sk); pctx->sk = sk; pctx->dev = gtp->dev; - ipv4_pdp_fill(pctx, info); + pctx->ms_af = ms_af; + + switch (ms_af) { + case AF_INET: + pctx->ms_addr_ip4.s_addr = ms_addr; + break; + case AF_INET6: + pctx->ms_addr_ip6 = ms6_addr; + break; + } + + pdp_fill(pctx, info); atomic_set(&pctx->tx_seq, 0); switch (pctx->gtp_version) { @@ -931,7 +1059,7 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, break; } - hlist_add_head_rcu(&pctx->hlist_addr, >p->addr_hash[hash_ms]); + hlist_add_head_rcu(&pctx->hlist_addr, addr_list); hlist_add_head_rcu(&pctx->hlist_tid, >p->tid_hash[hash_tid]); switch (pctx->gtp_version) { @@ -973,11 +1101,17 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) int err; if (!info->attrs[GTPA_VERSION] || - !info->attrs[GTPA_LINK] || - !info->attrs[GTPA_PEER_ADDRESS] || - !info->attrs[GTPA_MS_ADDRESS]) + !info->attrs[GTPA_LINK] || + !info->attrs[GTPA_PEER_ADDRESS]) return -EINVAL; + if (!(!!info->attrs[GTPA_MS_ADDRESS] ^ + !!info->attrs[GTPA_MS6_ADDRESS])) { + /* Either v4 or v6 mobile subscriber address must be set */ + + return -EINVAL; + } + version = nla_get_u32(info->attrs[GTPA_VERSION]); switch (version) { @@ -1016,7 +1150,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) goto out_unlock; } - err = ipv4_pdp_add(gtp, sk, info); + err = gtp_pdp_add(gtp, sk, info); out_unlock: rcu_read_unlock(); @@ -1036,6 +1170,11 @@ static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net, __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]); return ipv4_pdp_find(gtp, ip); + } else if (nla[GTPA_MS6_ADDRESS]) { + struct in6_addr ip6 = + nla_get_in6_addr(nla[GTPA_MS6_ADDRESS]); + + return ipv6_pdp_find(gtp, &ip6); } else if (nla[GTPA_VERSION]) { u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]); @@ -1106,10 +1245,26 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, goto nlmsg_failure; if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || - nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || - nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) + nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr)) goto nla_put_failure; + switch (pctx->ms_af) { + case AF_INET: + if (nla_put_be32(skb, GTPA_MS_ADDRESS, + pctx->ms_addr_ip4.s_addr)) + goto nla_put_failure; + + break; + case AF_INET6: + if (nla_put_in6_addr(skb, GTPA_MS6_ADDRESS, + &pctx->ms_addr_ip6)) + goto nla_put_failure; + + break; + default: + goto nla_put_failure; + } + switch (pctx->gtp_version) { case GTP_V0: if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) || @@ -1219,6 +1374,8 @@ static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { [GTPA_TID] = { .type = NLA_U64, }, [GTPA_PEER_ADDRESS] = { .type = NLA_U32, }, [GTPA_MS_ADDRESS] = { .type = NLA_U32, }, + [GTPA_MS6_ADDRESS] = { .len = FIELD_SIZEOF(struct ipv6hdr, + daddr) }, [GTPA_FLOW] = { .type = NLA_U16, }, [GTPA_NET_NS_FD] = { .type = NLA_U32, }, [GTPA_I_TEI] = { .type = NLA_U32, }, diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h index b2283a5c6d7f..ae4e632c0360 100644 --- a/include/uapi/linux/gtp.h +++ b/include/uapi/linux/gtp.h @@ -28,6 +28,7 @@ enum gtp_attrs { GTPA_O_TEI, /* for GTPv1 only */ GTPA_PAD, GTPA_PORT, + GTPA_MS6_ADDRESS, __GTPA_MAX, }; #define GTPA_MAX (__GTPA_MAX + 1) From patchwork Tue Sep 19 00:38:58 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815228 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="eE2Q3JDP"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3v14H84z9s78 for ; Tue, 19 Sep 2017 10:40:29 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751584AbdISAk2 (ORCPT ); Mon, 18 Sep 2017 20:40:28 -0400 Received: from mail-pg0-f44.google.com ([74.125.83.44]:50966 "EHLO mail-pg0-f44.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751462AbdISAjv (ORCPT ); Mon, 18 Sep 2017 20:39:51 -0400 Received: by mail-pg0-f44.google.com with SMTP id p5so1089160pgn.7 for ; Mon, 18 Sep 2017 17:39:51 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=U/2Id3hY7HV6sGPlDyytYA6bhvTRn/VUCGO5tu8eL7U=; b=eE2Q3JDP354zV22ZvA/w7iag7OGXwpNVsgdz1J7Kfh9X/YndUyEYIr0pq1K7mNfOJT Xx+Ev5QCEzYSuiTuOEXSITxAK/psaZtqOVLJM8o3zS/6+iM6sWngp6T8Uwk/eMGxgMbZ 3mjMXkIr7BtS4wR+ImdD+T+4FlAFbjmBTrKAJp63cwN8JNV8wi35qsEsobHIRmPk7z7N 3XspzYk6yThmthvsAz5GJld2v6zFBlks+LX7N0zkgc2tKLtlqbsXWMHunKcFXKlVBR1w Not+NGdeCpbUI0R9CSTB0qMWKC7Ng7fAQ5CgtojTeHLlPsrWUMDvb6zn4q/4sVPl0GzB Drbw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=U/2Id3hY7HV6sGPlDyytYA6bhvTRn/VUCGO5tu8eL7U=; b=jp1AQaG3K6/NHdSrlHhbEN2qFQcdCDRTVZBILN+cV7/oPO1YOqRmM4nZRMeRSN7xVp jLlscN+uuKP5PbHWUMvEM+kaOZELdHjgw8CfFBz1ujB7AVDUMbbgjtf/B5lSuIdHJzcW hViTaAefJTSC8jNClUi0JvSqSHrV1iltTJH1WfvcAXD0HRRdWp0Hh7dejLfE9y1TeYP0 yZpesNcV1E60zCKFp3Ac4Pfn8kdHn8asmIbvJ87Tn57L9ZxNBhDwKd9WfhDv7KzChjvm RuPbXO8gySBvYPIM6iUY6339X3Rgmzbha51EepKa6QrxXTSPkapjyFrQWN/FHXD5YajT BEng== X-Gm-Message-State: AHPjjUiV5McLoX9JTVowdFgemWrV8a/k3hzg4naN3hAY+1jrV50fe5LY Qgf88osu0Tyd9SI9 X-Google-Smtp-Source: AOwi7QDtEXPdmXyc61L2RUPMwCCHbLMSmo0aVmpeEWo/qXo35UIxAPOIGXvQdqf2lYftRvpITe1mSA== X-Received: by 10.84.244.73 with SMTP id e9mr371720plt.112.1505781591318; Mon, 18 Sep 2017 17:39:51 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.39.49 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:39:50 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 08/14] gtp: Support encpasulating over IPv6 Date: Mon, 18 Sep 2017 17:38:58 -0700 Message-Id: <20170919003904.5124-9-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Allow peers to be specified by IPv6 addresses. Signed-off-by: Tom Herbert --- drivers/net/gtp.c | 198 +++++++++++++++++++++++++++++++++---------- include/uapi/linux/gtp.h | 1 + include/uapi/linux/if_link.h | 3 + 3 files changed, 158 insertions(+), 44 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 62c0c968efa6..121b41e7a901 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -61,7 +62,11 @@ struct pdp_ctx { struct in6_addr ms_addr_ip6; }; - struct in_addr peer_addr_ip4; + u16 peer_af; + union { + struct in_addr peer_addr_ip4; + struct in6_addr peer_addr_ip6; + }; struct sock *sk; struct net_device *dev; @@ -76,6 +81,8 @@ struct pdp_ctx { struct gtp_dev { struct list_head list; + unsigned int is_ipv6:1; + struct sock *sk0; struct sock *sk1u; @@ -515,8 +522,6 @@ static int gtp_xmit(struct sk_buff *skb, struct net_device *dev, struct pdp_ctx *pctx) { struct sock *sk = pctx->sk; - __be32 saddr = inet_sk(sk)->inet_saddr; - struct rtable *rt; int err = 0; /* Ensure there is sufficient headroom. */ @@ -526,28 +531,63 @@ static int gtp_xmit(struct sk_buff *skb, struct net_device *dev, skb_reset_inner_headers(skb); - rt = ip_tunnel_get_route(dev, skb, sk->sk_protocol, - sk->sk_bound_dev_if, RT_CONN_FLAGS(sk), - pctx->peer_addr_ip4.s_addr, &saddr, - pctx->gtp_port, pctx->gtp_port, - &pctx->dst_cache, NULL); + if (pctx->peer_af == AF_INET) { + __be32 saddr = inet_sk(sk)->inet_saddr; + struct rtable *rt; - if (IS_ERR(rt)) { - err = PTR_ERR(rt); - goto out_err; - } + rt = ip_tunnel_get_route(dev, skb, sk->sk_protocol, + sk->sk_bound_dev_if, RT_CONN_FLAGS(sk), + pctx->peer_addr_ip4.s_addr, &saddr, + pctx->gtp_port, pctx->gtp_port, + &pctx->dst_cache, NULL); + + if (IS_ERR(rt)) { + err = PTR_ERR(rt); + goto out_err; + } + + skb_dst_drop(skb); + + gtp_push_header(skb, pctx); + udp_tunnel_xmit_skb(rt, sk, skb, saddr, + pctx->peer_addr_ip4.s_addr, + 0, ip4_dst_hoplimit(&rt->dst), 0, + pctx->gtp_port, pctx->gtp_port, + false, false); - skb_dst_drop(skb); + netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n", + &saddr, &pctx->peer_addr_ip4.s_addr); - gtp_push_header(skb, pctx); - udp_tunnel_xmit_skb(rt, sk, skb, saddr, - pctx->peer_addr_ip4.s_addr, - 0, ip4_dst_hoplimit(&rt->dst), 0, - pctx->gtp_port, pctx->gtp_port, - false, false); +#if IS_ENABLED(CONFIG_IPV6) + } else if (pctx->peer_af == AF_INET6) { + struct in6_addr saddr = inet6_sk(sk)->saddr; + struct dst_entry *dst; - netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n", - &saddr, &pctx->peer_addr_ip4.s_addr); + dst = ip6_tnl_get_route(dev, skb, sk, sk->sk_protocol, + sk->sk_bound_dev_if, 0, + 0, &pctx->peer_addr_ip6, &saddr, + pctx->gtp_port, pctx->gtp_port, + &pctx->dst_cache, NULL); + + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto out_err; + } + + skb_dst_drop(skb); + + gtp_push_header(skb, pctx); + udp_tunnel6_xmit_skb(dst, sk, skb, dev, + &saddr, &pctx->peer_addr_ip6, + 0, ip6_dst_hoplimit(dst), 0, + pctx->gtp_port, pctx->gtp_port, + true); + + netdev_dbg(dev, "gtp -> IP src: %pI6 dst: %pI6\n", + &saddr, &pctx->peer_addr_ip6); + +#endif + } return 0; @@ -652,7 +692,8 @@ static void gtp_link_setup(struct net_device *dev) /* Assume largest header, ie. GTPv0. */ dev->needed_headroom = LL_MAX_HEADER + - sizeof(struct iphdr) + + max_t(int, sizeof(struct iphdr), + sizeof(struct ipv6hdr)) + sizeof(struct udphdr) + sizeof(struct gtp0_header); @@ -661,12 +702,15 @@ static void gtp_link_setup(struct net_device *dev) static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); static void gtp_hashtable_free(struct gtp_dev *gtp); -static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]); +static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[], + bool is_ipv6); static int gtp_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + unsigned int role = GTP_ROLE_GGSN; + bool is_ipv6 = false; struct gtp_dev *gtp; struct gtp_net *gn; int hashsize, err; @@ -674,9 +718,30 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1]) return -EINVAL; + if (data[IFLA_GTP_ROLE]) { + role = nla_get_u32(data[IFLA_GTP_ROLE]); + if (role > GTP_ROLE_SGSN) + return -EINVAL; + } + + if (data[IFLA_GTP_AF]) { + u16 af = nla_get_u16(data[IFLA_GTP_AF]); + + switch (af) { + case AF_INET: + is_ipv6 = false; + break; + case AF_INET6: + is_ipv6 = true; + break; + default: + return -EINVAL; + } + } + gtp = netdev_priv(dev); - err = gtp_encap_enable(gtp, data); + err = gtp_encap_enable(gtp, data, is_ipv6); if (err < 0) return err; @@ -695,6 +760,9 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, goto out_hashtable; } + gtp->role = role; + gtp->is_ipv6 = is_ipv6; + gn = net_generic(dev_net(dev), gtp_net_id); list_add_rcu(>p->list, &gn->gtp_dev_list); @@ -816,7 +884,8 @@ static void gtp_hashtable_free(struct gtp_dev *gtp) } static struct sock *gtp_encap_enable_socket(int fd, int type, - struct gtp_dev *gtp) + struct gtp_dev *gtp, + bool is_ipv6) { struct udp_tunnel_sock_cfg tuncfg = {NULL}; struct socket *sock; @@ -837,6 +906,12 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, goto out_sock; } + if (sock->sk->sk_family != (is_ipv6 ? AF_INET6 : AF_INET)) { + pr_debug("socket fd=%d not right family\n", fd); + sk = ERR_PTR(-EINVAL); + goto out_sock; + } + if (rcu_dereference_sk_user_data(sock->sk)) { sk = ERR_PTR(-EBUSY); goto out_sock; @@ -869,16 +944,16 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, return sk; } -static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) +static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[], + bool is_ipv6) { - struct sock *sk1u = NULL; - struct sock *sk0 = NULL; - unsigned int role = GTP_ROLE_GGSN; + struct sock *sk0 = NULL, *sk1u = NULL; if (data[IFLA_GTP_FD0]) { u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]); - sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp); + sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp, + is_ipv6); if (IS_ERR(sk0)) return PTR_ERR(sk0); } @@ -886,7 +961,8 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) if (data[IFLA_GTP_FD1]) { u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]); - sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp); + sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp, + is_ipv6); if (IS_ERR(sk1u)) { if (sk0) gtp_encap_disable_sock(sk0); @@ -894,15 +970,8 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) } } - if (data[IFLA_GTP_ROLE]) { - role = nla_get_u32(data[IFLA_GTP_ROLE]); - if (role > GTP_ROLE_SGSN) - return -EINVAL; - } - gtp->sk0 = sk0; gtp->sk1u = sk1u; - gtp->role = role; return 0; } @@ -938,8 +1007,16 @@ static void pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) __be16 default_port = 0; pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); - pctx->peer_addr_ip4.s_addr = - nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); + + if (info->attrs[GTPA_PEER_ADDRESS]) { + pctx->peer_af = AF_INET; + pctx->peer_addr_ip4.s_addr = + nla_get_in_addr(info->attrs[GTPA_PEER_ADDRESS]); + } else if (info->attrs[GTPA_PEER6_ADDRESS]) { + pctx->peer_af = AF_INET6; + pctx->peer_addr_ip6 = nla_get_in6_addr( + info->attrs[GTPA_PEER6_ADDRESS]); + } switch (pctx->gtp_version) { case GTP_V0: @@ -1101,9 +1178,15 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) int err; if (!info->attrs[GTPA_VERSION] || - !info->attrs[GTPA_LINK] || - !info->attrs[GTPA_PEER_ADDRESS]) + !info->attrs[GTPA_LINK]) + return -EINVAL; + + if (!(!!info->attrs[GTPA_PEER_ADDRESS] ^ + !!info->attrs[GTPA_PEER6_ADDRESS])) { + /* Either v4 or v6 peer address must be set */ + return -EINVAL; + } if (!(!!info->attrs[GTPA_MS_ADDRESS] ^ !!info->attrs[GTPA_MS6_ADDRESS])) { @@ -1138,6 +1221,12 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) goto out_unlock; } + if ((info->attrs[GTPA_PEER_ADDRESS] && gtp->is_ipv6) || + (info->attrs[GTPA_PEER6_ADDRESS] && !gtp->is_ipv6)) { + err = -EINVAL; + goto out_unlock; + } + if (version == GTP_V0) sk = gtp->sk0; else if (version == GTP_V1) @@ -1244,9 +1333,28 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, if (genlh == NULL) goto nlmsg_failure; - if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || - nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr)) + if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version)) + goto nla_put_failure; + + if (nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex)) + goto nla_put_failure; + + switch (pctx->peer_af) { + case AF_INET: + if (nla_put_be32(skb, GTPA_PEER_ADDRESS, + pctx->peer_addr_ip4.s_addr)) + goto nla_put_failure; + + break; + case AF_INET6: + if (nla_put_in6_addr(skb, GTPA_PEER6_ADDRESS, + &pctx->peer_addr_ip6)) + goto nla_put_failure; + + break; + default: goto nla_put_failure; + } switch (pctx->ms_af) { case AF_INET: @@ -1373,6 +1481,8 @@ static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { [GTPA_VERSION] = { .type = NLA_U32, }, [GTPA_TID] = { .type = NLA_U64, }, [GTPA_PEER_ADDRESS] = { .type = NLA_U32, }, + [GTPA_PEER6_ADDRESS] = { .len = FIELD_SIZEOF(struct ipv6hdr, + daddr) }, [GTPA_MS_ADDRESS] = { .type = NLA_U32, }, [GTPA_MS6_ADDRESS] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) }, diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h index ae4e632c0360..8eec519fa754 100644 --- a/include/uapi/linux/gtp.h +++ b/include/uapi/linux/gtp.h @@ -29,6 +29,7 @@ enum gtp_attrs { GTPA_PAD, GTPA_PORT, GTPA_MS6_ADDRESS, + GTPA_PEER6_ADDRESS, __GTPA_MAX, }; #define GTPA_MAX (__GTPA_MAX + 1) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 8d062c58d5cb..81c26864abeb 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -552,6 +552,9 @@ enum { IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, + IFLA_GTP_AF, + IFLA_GTP_PORT0, + IFLA_GTP_PORT1, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) From patchwork Tue Sep 19 00:38:59 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815226 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="JiY9biEa"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3tv45y4z9s5L for ; Tue, 19 Sep 2017 10:40:23 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751497AbdISAj5 (ORCPT ); Mon, 18 Sep 2017 20:39:57 -0400 Received: from mail-pf0-f180.google.com ([209.85.192.180]:43267 "EHLO mail-pf0-f180.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751451AbdISAjy (ORCPT ); Mon, 18 Sep 2017 20:39:54 -0400 Received: by mail-pf0-f180.google.com with SMTP id y29so1113090pff.0 for ; Mon, 18 Sep 2017 17:39:54 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=iBnLdbl8fy/qxABXYiVeXFIq+uuIK5Mwzs4+/8Gjrr8=; b=JiY9biEaT9H7ljTaW4qx2zjpuwLvZIj6dKbAoyjR6HHVI928bVHcYJ7x6q3bIAyFZJ +JylU3a6RHVTAOHcQw/fcgdTWY3jRvZiY6Dkf0vO552VLLIri0pucY7jEB47e4uCYo8l KoV/aYF1IXdrR1SiDV9k1pJNfMkzl7NRxk7yGnqdMAJRZm4aQoT4HoMaIbWViTR7z/sZ oIQZZ91GjTgwua3qOuenk+UhV8DIfBRTovSK7ci4Mx1M1dMyogNCRECyHDxVv8Lr87bT 0TJTTXeOd9oJ9xWVZhSaUEcYla3pOLaem5SVXbogDQzHJpRboV8Sjmx2BUiKb7iWd+cJ +SPA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=iBnLdbl8fy/qxABXYiVeXFIq+uuIK5Mwzs4+/8Gjrr8=; b=TkCWFE8wLwXmNDaJSdNPjC07xjhuPnQ3WtniN6Ql9pNvO0K/07bkXHibl7h/kBiKp5 +twWsaXJErLosmMPNbQ1kmO85V/IGVlEhb0MyKbgl5g0BJjvlbuer+mUBI0PjVFjV0nz JQDeCuBH3jevgnrAgf8DXv4MFuoMkU1SPr4PHR8zfx46psFElYCYcY8oqYLRUK9Zotza qwYuel5trOy8g8OU3jhWQUFx/PLc8HQOddWto1yXfVfw74Dky5XOQ/cqjbdcq9pXZ2uO SW03Vf3ET43jsSCPib6SI15eQ1SOzvisXDXjA9xmLYa+RTLlZt96hbR91KRIyT+1E4tx phtg== X-Gm-Message-State: AHPjjUjnlVDkA1KCiFvgoi9C1VOFYexIi31b8DuImQsiiiyZkExQZopz rTuTgv+X1YmrSccr X-Google-Smtp-Source: AOwi7QC0bqA+I6XjOWIZPJddvhMRboQo2SbfeLpaCpwbi+IqJw38JE4cnHtVeSlAcGg6Vd1LoJdBPA== X-Received: by 10.84.252.130 with SMTP id y2mr350269pll.68.1505781593663; Mon, 18 Sep 2017 17:39:53 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.39.52 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:39:52 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 09/14] gtp: Allow configuring GTP interface as standalone Date: Mon, 18 Sep 2017 17:38:59 -0700 Message-Id: <20170919003904.5124-10-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Add new configuration of GTP interfaces that allow specifying a port to listen on (as opposed to having to get sockets from a userspace control plane). This allows GTP interfaces to be configured and the data path tested without requiring a GTP-C daemon. Signed-off-by: Tom Herbert --- drivers/net/gtp.c | 212 +++++++++++++++++++++++++++++++++++------------ include/uapi/linux/gtp.h | 5 ++ 2 files changed, 166 insertions(+), 51 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 121b41e7a901..1870469a4982 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -86,6 +86,9 @@ struct gtp_dev { struct sock *sk0; struct sock *sk1u; + struct socket *sock0; + struct socket *sock1u; + struct net_device *dev; unsigned int role; @@ -430,26 +433,33 @@ static void gtp_encap_destroy(struct sock *sk) } } -static void gtp_encap_disable_sock(struct sock *sk) +static void gtp_encap_release(struct gtp_dev *gtp) { - if (!sk) - return; + if (gtp->sk0) { + if (gtp->sock0) { + udp_tunnel_sock_release(gtp->sock0); + gtp->sock0 = NULL; + } else { + gtp_encap_destroy(gtp->sk0); + } - gtp_encap_destroy(sk); -} + gtp->sk0 = NULL; + } -static void gtp_encap_disable(struct gtp_dev *gtp) -{ - gtp_encap_disable_sock(gtp->sk0); - gtp_encap_disable_sock(gtp->sk1u); + if (gtp->sk1u) { + if (gtp->sock1u) { + udp_tunnel_sock_release(gtp->sock1u); + gtp->sock1u = NULL; + } else { + gtp_encap_destroy(gtp->sk1u); + } + + gtp->sk1u = NULL; + } } static int gtp_dev_init(struct net_device *dev) { - struct gtp_dev *gtp = netdev_priv(dev); - - gtp->dev = dev; - dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; @@ -461,7 +471,8 @@ static void gtp_dev_uninit(struct net_device *dev) { struct gtp_dev *gtp = netdev_priv(dev); - gtp_encap_disable(gtp); + gtp_encap_release(gtp); + free_percpu(dev->tstats); } @@ -676,6 +687,7 @@ static const struct net_device_ops gtp_netdev_ops = { static void gtp_link_setup(struct net_device *dev) { + struct gtp_dev *gtp = netdev_priv(dev); dev->netdev_ops = >p_netdev_ops; dev->needs_free_netdev = true; @@ -697,6 +709,8 @@ static void gtp_link_setup(struct net_device *dev) sizeof(struct udphdr) + sizeof(struct gtp0_header); + gtp->dev = dev; + gro_cells_init(>p->gro_cells, dev); } @@ -710,13 +724,19 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, struct netlink_ext_ack *extack) { unsigned int role = GTP_ROLE_GGSN; + bool have_fd, have_ports; bool is_ipv6 = false; struct gtp_dev *gtp; struct gtp_net *gn; int hashsize, err; - if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1]) + have_fd = !!data[IFLA_GTP_FD0] || !!data[IFLA_GTP_FD1]; + have_ports = !!data[IFLA_GTP_PORT0] || !!data[IFLA_GTP_PORT1]; + + if (!(have_fd ^ have_ports)) { + /* Either got fd(s) or port(s) */ return -EINVAL; + } if (data[IFLA_GTP_ROLE]) { role = nla_get_u32(data[IFLA_GTP_ROLE]); @@ -773,7 +793,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, out_hashtable: gtp_hashtable_free(gtp); out_encap: - gtp_encap_disable(gtp); + gtp_encap_release(gtp); return err; } @@ -782,7 +802,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head) struct gtp_dev *gtp = netdev_priv(dev); gro_cells_destroy(>p->gro_cells); - gtp_encap_disable(gtp); + gtp_encap_release(gtp); gtp_hashtable_free(gtp); list_del_rcu(>p->list); unregister_netdevice_queue(dev, head); @@ -793,6 +813,8 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = { [IFLA_GTP_FD1] = { .type = NLA_U32 }, [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 }, [IFLA_GTP_ROLE] = { .type = NLA_U32 }, + [IFLA_GTP_PORT0] = { .type = NLA_U16 }, + [IFLA_GTP_PORT1] = { .type = NLA_U16 }, }; static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], @@ -883,11 +905,35 @@ static void gtp_hashtable_free(struct gtp_dev *gtp) kfree(gtp->tid_hash); } -static struct sock *gtp_encap_enable_socket(int fd, int type, - struct gtp_dev *gtp, - bool is_ipv6) +static int gtp_encap_enable_sock(struct socket *sock, int type, + struct gtp_dev *gtp) { struct udp_tunnel_sock_cfg tuncfg = {NULL}; + + switch (type) { + case UDP_ENCAP_GTP0: + tuncfg.encap_rcv = gtp0_udp_encap_recv; + break; + case UDP_ENCAP_GTP1U: + tuncfg.encap_rcv = gtp1u_udp_encap_recv; + break; + default: + pr_debug("Unknown encap type %u\n", type); + return -EINVAL; + } + + tuncfg.sk_user_data = gtp; + tuncfg.encap_type = type; + tuncfg.encap_destroy = gtp_encap_destroy; + + setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); + + return 0; +} + +static struct sock *gtp_encap_enable_fd(int fd, int type, struct gtp_dev *gtp, + bool is_ipv6) +{ struct socket *sock; struct sock *sk; int err; @@ -920,60 +966,124 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, sk = sock->sk; sock_hold(sk); - switch (type) { - case UDP_ENCAP_GTP0: - tuncfg.encap_rcv = gtp0_udp_encap_recv; - break; - case UDP_ENCAP_GTP1U: - tuncfg.encap_rcv = gtp1u_udp_encap_recv; - break; - default: - pr_debug("Unknown encap type %u\n", type); - sk = ERR_PTR(-EINVAL); - goto out_sock; - } - - tuncfg.sk_user_data = gtp; - tuncfg.encap_type = type; - tuncfg.encap_destroy = gtp_encap_destroy; - - setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); + err = gtp_encap_enable_sock(sock, type, gtp); + if (err < 0) + sk = ERR_PTR(err); out_sock: sockfd_put(sock); return sk; } +static struct socket *gtp_create_sock(struct net *net, bool ipv6, + __be16 port, u32 flags) +{ + struct socket *sock; + struct udp_port_cfg udp_conf; + int err; + + memset(&udp_conf, 0, sizeof(udp_conf)); + + if (ipv6) { + udp_conf.family = AF_INET6; + udp_conf.ipv6_v6only = 1; + } else { + udp_conf.family = AF_INET; + } + + udp_conf.local_udp_port = port; + + /* Open UDP socket */ + err = udp_sock_create(net, &udp_conf, &sock); + if (err) + return ERR_PTR(err); + + return sock; +} + static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[], bool is_ipv6) { + int err; + + struct socket *sock0 = NULL, *sock1u = NULL; struct sock *sk0 = NULL, *sk1u = NULL; if (data[IFLA_GTP_FD0]) { u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]); - sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp, - is_ipv6); - if (IS_ERR(sk0)) - return PTR_ERR(sk0); + sk0 = gtp_encap_enable_fd(fd0, UDP_ENCAP_GTP0, gtp, is_ipv6); + if (IS_ERR(sk0)) { + err = PTR_ERR(sk0); + sk0 = NULL; + goto out_err; + } + } else if (data[IFLA_GTP_PORT0]) { + __be16 port = nla_get_u16(data[IFLA_GTP_PORT0]); + + sock0 = gtp_create_sock(dev_net(gtp->dev), is_ipv6, port, 0); + if (IS_ERR(sock0)) { + err = PTR_ERR(sock0); + sock0 = NULL; + goto out_err; + } + + err = gtp_encap_enable_sock(sock0, UDP_ENCAP_GTP0, gtp); + if (err) + goto out_err; } if (data[IFLA_GTP_FD1]) { u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]); - sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp, - is_ipv6); + sk1u = gtp_encap_enable_fd(fd1, UDP_ENCAP_GTP1U, gtp, is_ipv6); if (IS_ERR(sk1u)) { - if (sk0) - gtp_encap_disable_sock(sk0); - return PTR_ERR(sk1u); + err = PTR_ERR(sk1u); + sk1u = NULL; + goto out_err; + } + } else if (data[IFLA_GTP_PORT1]) { + __be16 port = nla_get_u16(data[IFLA_GTP_PORT1]); + + sock1u = gtp_create_sock(dev_net(gtp->dev), is_ipv6, port, 0); + if (IS_ERR(sock1u)) { + err = PTR_ERR(sock1u); + sock1u = NULL; + goto out_err; } + + err = gtp_encap_enable_sock(sock1u, UDP_ENCAP_GTP1U, gtp); + if (err) + goto out_err; + } + + if (sock0) { + gtp->sock0 = sock0; + gtp->sk0 = sock0->sk; + } else { + gtp->sk0 = sk0; } - gtp->sk0 = sk0; - gtp->sk1u = sk1u; + if (sock1u) { + gtp->sock1u = sock1u; + gtp->sk1u = sock1u->sk; + } else { + gtp->sk1u = sk1u; + } return 0; + +out_err: + if (sk0) + gtp_encap_destroy(sk0); + if (sk1u) + gtp_encap_destroy(sk1u); + if (sock0) + udp_tunnel_sock_release(sock0); + if (sock1u) + udp_tunnel_sock_release(sock1u); + + return err; } static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[]) @@ -1515,8 +1625,8 @@ static const struct genl_ops gtp_genl_ops[] = { }; static struct genl_family gtp_genl_family __ro_after_init = { - .name = "gtp", - .version = 0, + .name = GTP_GENL_NAME, + .version = GTP_GENL_VERSION, .hdrsize = 0, .maxattr = GTPA_MAX, .netnsok = true, diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h index 8eec519fa754..0da18aa88be8 100644 --- a/include/uapi/linux/gtp.h +++ b/include/uapi/linux/gtp.h @@ -9,6 +9,11 @@ enum gtp_genl_cmds { GTP_CMD_MAX, }; +/* NETLINK_GENERIC related info + */ +#define GTP_GENL_NAME "gtp" +#define GTP_GENL_VERSION 0 + enum gtp_version { GTP_V0 = 0, GTP_V1, From patchwork Tue Sep 19 00:39:00 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815221 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="tdHzzkyu"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3tS4pKGz9s5L for ; Tue, 19 Sep 2017 10:40:00 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751509AbdISAj6 (ORCPT ); Mon, 18 Sep 2017 20:39:58 -0400 Received: from mail-pf0-f171.google.com ([209.85.192.171]:55703 "EHLO mail-pf0-f171.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751480AbdISAj4 (ORCPT ); Mon, 18 Sep 2017 20:39:56 -0400 Received: by mail-pf0-f171.google.com with SMTP id r71so1099533pfe.12 for ; Mon, 18 Sep 2017 17:39:56 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=qUlmMIMEM/vtKZ0+B0GQrBrLIjMcKU+y5Ie/ZIjbBVE=; b=tdHzzkyuSw3gUiqwdknhgyWqemENAXQvPIZIAhTWlAMf1aH7zETnFDIUrv8zjPbs+S gFAIuK2NeFJtFyrdpZeYZEL3sO/IkKWaL29eH6GRdfYd4Jeo39UMO9liBQaaX8phrzkk G5RwHcqsOd/p5uRvRcWwsQjlh/eE3d+jXh28yq8VswnKVNTvmXomq7P2RqEiUhoWIX60 0HCnP3D9P9TIYNBpd7eSoPiCUyl9wXRNflwv1s/f2KynPYwn01N9vWMpTSmSiU32E3bw KICNxZUdWNpCqIG2iqJktV9gN6+UWHpCvXC5wHHt2S6ByznErZ0tiqfVVgYah1uBZHmk MjVw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=qUlmMIMEM/vtKZ0+B0GQrBrLIjMcKU+y5Ie/ZIjbBVE=; b=lctH1jXPOyhkaPPzMBpsGuPGgBaNHIIbkpEi6A1tAdkpGB1DXAzHMRyjGu0zJIQ42S vnyZfX2xg6P509croH9yGW2gCAqQLbtLUBMMMX7AuO/XJT2a+CnmUJoZqRM4eXdg+cU8 LCImfPp8zSNYIdDPsjuZod3nljaGyMeLjFDB13dzcM7Dy5u7NixH1O9hwULBLLgHU1gd zv8gF62HrPKIJEp3AUbX3POxdofKRB7A3ShN/GJJkG1UkHbC2cJzdQT/iekvnqs7fy24 8e9ZJgNK6MpJr3jNrAt4qc1paxFhojDpB2uNto4I6NGPFORrCmY71lhZzFP6wUi4b1nj dyKQ== X-Gm-Message-State: AHPjjUh19H7PtsBCZAYP+9P0ru1DtPHSiSEu7hrmYcB7FcFhqXylNvry jxYsCbvKga/rrD5D X-Google-Smtp-Source: AOwi7QC37kWjj/j88o+cFM475uMmbfrKKHACDlCRvGkFp31Bgnhe8KrX/et0/D828gc/rQQbJ8dPSQ== X-Received: by 10.84.235.72 with SMTP id g8mr350730plt.86.1505781595708; Mon, 18 Sep 2017 17:39:55 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.39.54 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:39:54 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 10/14] gtp: Add support for devnet Date: Mon, 18 Sep 2017 17:39:00 -0700 Message-Id: <20170919003904.5124-11-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Add a net field to gtp that is derived from src_net. Use net_eq to make cross net argument for transmit functions. Signed-off-by: Tom Herbert --- drivers/net/gtp.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 1870469a4982..393f63cb2576 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -89,6 +89,7 @@ struct gtp_dev { struct socket *sock0; struct socket *sock1u; + struct net *net; struct net_device *dev; unsigned int role; @@ -271,6 +272,7 @@ static u16 ipver_to_eth(struct iphdr *iph) static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, unsigned int hdrlen, unsigned int role) { + struct gtp_dev *gtp = netdev_priv(pctx->dev); struct pcpu_sw_netstats *stats; u16 inner_protocol; @@ -285,8 +287,7 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, /* Get rid of the GTP + UDP headers. */ if (iptunnel_pull_header(skb, hdrlen, inner_protocol, - !net_eq(sock_net(pctx->sk), - dev_net(pctx->dev)))) + !net_eq(gtp->net, dev_net(pctx->dev)))) return -1; netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n"); @@ -532,6 +533,8 @@ static void gtp_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) static int gtp_xmit(struct sk_buff *skb, struct net_device *dev, struct pdp_ctx *pctx) { + struct gtp_dev *gtp = netdev_priv(dev); + bool xnet = !net_eq(gtp->net, dev_net(gtp->dev)); struct sock *sk = pctx->sk; int err = 0; @@ -564,7 +567,7 @@ static int gtp_xmit(struct sk_buff *skb, struct net_device *dev, pctx->peer_addr_ip4.s_addr, 0, ip4_dst_hoplimit(&rt->dst), 0, pctx->gtp_port, pctx->gtp_port, - false, false); + xnet, false); netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n", &saddr, &pctx->peer_addr_ip4.s_addr); @@ -782,6 +785,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, gtp->role = role; gtp->is_ipv6 = is_ipv6; + gtp->net = src_net; gn = net_generic(dev_net(dev), gtp_net_id); list_add_rcu(>p->list, &gn->gtp_dev_list); From patchwork Tue Sep 19 00:39:01 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815222 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="gvbSt2QB"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3tV6DP8z9s5L for ; Tue, 19 Sep 2017 10:40:02 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751519AbdISAkA (ORCPT ); Mon, 18 Sep 2017 20:40:00 -0400 Received: from mail-pg0-f51.google.com ([74.125.83.51]:50972 "EHLO mail-pg0-f51.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751499AbdISAj6 (ORCPT ); Mon, 18 Sep 2017 20:39:58 -0400 Received: by mail-pg0-f51.google.com with SMTP id p5so1089298pgn.7 for ; Mon, 18 Sep 2017 17:39:58 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=GJl+09fmAlrEwxv1vLKSfsQl/6LdaWqjqzwCT7Jc+Yo=; b=gvbSt2QBVTpUdFHkMhd1y2FCrZOKlWTetOCMUuA5S/IFK+Bpt48qy6x4cm3yh4QpRv feL4uHrcHtvW5sqI5w50SxWkzWDgIhzPLgMD3eAHfUZPMFkKVlqI24yTU4iP3C5ejzOm Vhxo/CZA7AVHX7wEzxJTrtN7jiYwj4cTaVJP9JcUlcGym0XhzUeeDIiZGDKOWiois+IQ aEYlUxDCRh94Bg65bpd7fntFOeTnMZuzVf5cJLCm/JpmD5Wbv4s9Wh3lmjrN7RZJqc6J t+HgZ7rc3V4UW52bFxnYmTW6C9aAqdh64scyPJI0YyVlmZFvJEY4dgTcFKiwfKQwc5kG MJrw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=GJl+09fmAlrEwxv1vLKSfsQl/6LdaWqjqzwCT7Jc+Yo=; b=I4/yp1K98WA7zXDhSX8dZUuZlr/gYJL9sYFXmhiNfkQFv6t+bTeMyPMLrBe2O8/lKD 9hb+prTmHhLyuBuIqhad+jYNs4ml8NNXxDCYm+gY5qZqs4qJ6UcaZVjN6lVomFocFIWi RfMYEBoQNu6/UDJq/XPVzIDsYmVUzygSW5iJMEADSpIK2RdNc0A+RgO/B47UegaIUPiJ UahFWogM7n8GLhQJ3FR6q3wUGOzLYFxFS5Mo5UIDq9uyOU4i4QMuaBUhHrSyw54QK0wE b4bPWQB9VbKy5m4eICAOF7jhforWMlIRIwC0vVUV6GPTZ7lQcubWywxe3zh0xxhRD0JM ftSQ== X-Gm-Message-State: AHPjjUj8NAsBEB9VF14fQ4u3bI+R2crzYwl2XRrE0bGkZpM935PnnqpH TIR651nzo8e3DL7n X-Google-Smtp-Source: AOwi7QCLoqP4o/U3u8sFdeXyCwl4Un5rHPFRD0+9cW2AqYFxgEejgW+ZgpA8QWynuHJwIf9kQgCkVA== X-Received: by 10.84.179.165 with SMTP id b34mr341381plc.15.1505781598136; Mon, 18 Sep 2017 17:39:58 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.39.56 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:39:57 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 11/14] net: Add a facility to support application defined GSO Date: Mon, 18 Sep 2017 17:39:01 -0700 Message-Id: <20170919003904.5124-12-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Allow applications or encapsulation protocols to register a GSO segment function to their specific protocol. To faciliate this I reserved the upper four bits in the gso_type to indicate the application specific GSO type. Zero in these bits indicates no application GSO, so there are fifteen instance that can be defined. An application registers a a gso_segment using the skb_gso_app_register this takes a struct skb_gso_app that indicates a callback function as well as a set of GSO types for which at least one must be matched before calling he segment function. GSO returns one of the application GSO types described above (not a fixed value for the applications). Subsequently, when the application sends a GSO packet the application gso_type is set in the skb gso_type along with any other types. skb_gso_app_segment is the function called from another GSO segment function to handle segmentation of the application or encapsulation protocol. This function includes check flags that provides context for the appropriate GSO instance to match. For instance, in order to handle a protocol encapsulated in UDP (GTP for instance) skb_gso_app_segment is call from udp_tunnel_segment and check flags would be SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_UDP_TUNNEL. Signed-off-by: Tom Herbert --- include/linux/netdevice.h | 31 +++++++++++++++++++++++++++++++ include/linux/skbuff.h | 25 +++++++++++++++++++++++++ net/core/dev.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ net/ipv4/ip_tunnel_core.c | 6 ++++++ net/ipv4/udp_offload.c | 20 +++++++++++++++----- 5 files changed, 124 insertions(+), 5 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f535779d9dc1..f3bed4f8ba83 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3932,6 +3932,37 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, netdev_features_t features); +struct skb_gso_app { + unsigned int check_flags; + struct sk_buff *(*gso_segment)(struct sk_buff *skb, + netdev_features_t features); +}; + +extern struct skb_gso_app *skb_gso_apps[]; +int skb_gso_app_register(const struct skb_gso_app *app); +void skb_gso_app_unregister(int num, const struct skb_gso_app *app); + +/* rcu_read_lock() must be held */ +static inline struct skb_gso_app *skb_gso_app_lookup(struct sk_buff *skb, + netdev_features_t features, + unsigned int check_flags) +{ + struct skb_gso_app *app; + int type; + + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_APP_MASK)) + return false; + + type = skb_gso_app_to_index(skb_shinfo(skb)->gso_type); + + app = rcu_dereference(skb_gso_apps[type]); + if (app && app->gso_segment && + (check_flags & app->check_flags)) + return app; + + return NULL; +} + struct netdev_bonding_info { ifslave slave; ifbond master; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 72299ef00061..ea45fb93897c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -535,6 +535,9 @@ enum { SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ }; +#define SKB_GSO_APP_LOW_SHIFT 28 +#define SKB_GSO_APP_HIGH_SHIFT 31 + enum { SKB_GSO_TCPV4 = 1 << 0, @@ -569,8 +572,30 @@ enum { SKB_GSO_SCTP = 1 << 14, SKB_GSO_ESP = 1 << 15, + + /* UDP encapsulation specific GSO consumes bits 28 through 31 */ + + SKB_GSO_APP_LOW = 1 << SKB_GSO_APP_LOW_SHIFT, + + SKB_GSO_APP_HIGH = 1 << SKB_GSO_APP_HIGH_SHIFT, }; +#define SKB_GSO_APP_MASK ((-1U << SKB_GSO_APP_LOW_SHIFT) & \ + (-1U >> (8*sizeof(u32) - SKB_GSO_APP_HIGH_SHIFT - 1))) +#define SKB_GSO_APP_NUM (SKB_GSO_APP_MASK >> SKB_GSO_APP_LOW_SHIFT) + +static inline int skb_gso_app_to_index(unsigned int x) +{ + /* Caller should check that app bits are non-zero */ + + return ((SKB_GSO_APP_MASK & x) >> SKB_GSO_APP_LOW_SHIFT) - 1; +} + +static inline int skb_gso_app_to_gso_type(unsigned int x) +{ + return (x + 1) << SKB_GSO_APP_LOW_SHIFT; +} + #if BITS_PER_LONG > 32 #define NET_SKBUFF_DATA_USES_OFFSET 1 #endif diff --git a/net/core/dev.c b/net/core/dev.c index fb766d906148..c77fca112e67 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -156,6 +156,7 @@ static DEFINE_SPINLOCK(ptype_lock); static DEFINE_SPINLOCK(offload_lock); +static DEFINE_SPINLOCK(skb_gso_app_lock); struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; struct list_head ptype_all __read_mostly; /* Taps */ static struct list_head offload_base __read_mostly; @@ -2725,6 +2726,52 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, } EXPORT_SYMBOL(skb_mac_gso_segment); +struct skb_gso_app *skb_gso_apps[SKB_GSO_APP_NUM]; +EXPORT_SYMBOL(skb_gso_apps); + +int skb_gso_app_register(const struct skb_gso_app *app) +{ + int i, ret = 0; + + spin_lock(&skb_gso_app_lock); + + for (i = 0; i < SKB_GSO_APP_NUM; i++) { + if (!rcu_dereference_protected(skb_gso_apps[i], + lockdep_is_held(&skb_gso_app_lock))) { + /* Found an empty slot */ + rcu_assign_pointer(skb_gso_apps[i], app); + + ret = skb_gso_app_to_gso_type(i); + + break; + } + } + + spin_unlock(&skb_gso_app_lock); + + return ret; +return 0; +} +EXPORT_SYMBOL(skb_gso_app_register); + +void skb_gso_app_unregister(int num, const struct skb_gso_app *app) +{ + if (!num) + return; + + num = skb_gso_app_to_index(num); + + spin_lock(&skb_gso_app_lock); + + if (app == rcu_dereference_protected(skb_gso_apps[num], + lockdep_is_held(&skb_gso_app_lock))) { + /* Matched entry */ + rcu_assign_pointer(skb_gso_apps[num], NULL); + } + + spin_unlock(&skb_gso_app_lock); +} +EXPORT_SYMBOL(skb_gso_app_unregister); /* openvswitch calls this on rx path, so we need a different check. */ diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 2f39479be92f..f2fd96d55c4e 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -171,6 +171,12 @@ int iptunnel_handle_offloads(struct sk_buff *skb, err = skb_header_unclone(skb, GFP_ATOMIC); if (unlikely(err)) return err; + if (!!(gso_type_mask & SKB_GSO_APP_MASK) && + !!(skb_shinfo(skb)->gso_type & SKB_GSO_APP_MASK)) { + /* Only allow one GSO app per packet */ + return -EALREADY; + } + skb_shinfo(skb)->gso_type |= gso_type_mask; return 0; } diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 97658bfc1b58..ba58b36b35b2 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -152,19 +152,29 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, bool is_ipv6) { - __be16 protocol = skb->protocol; - const struct net_offload **offloads; - const struct net_offload *ops; - struct sk_buff *segs = ERR_PTR(-EINVAL); struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, netdev_features_t features); + const struct net_offload **offloads; + __be16 protocol = skb->protocol; + struct skb_gso_app *gso_app; + const struct net_offload *ops; + struct sk_buff *segs; + + segs = ERR_PTR(-EINVAL); rcu_read_lock(); + gso_app = skb_gso_app_lookup(skb, features, + SKB_GSO_UDP_TUNNEL_CSUM | + SKB_GSO_UDP_TUNNEL); + switch (skb->inner_protocol_type) { case ENCAP_TYPE_ETHER: protocol = skb->inner_protocol; - gso_inner_segment = skb_mac_gso_segment; + if (gso_app && gso_app->gso_segment) + gso_inner_segment = gso_app->gso_segment; + else + gso_inner_segment = skb_mac_gso_segment; break; case ENCAP_TYPE_IPPROTO: offloads = is_ipv6 ? inet6_offloads : inet_offloads; From patchwork Tue Sep 19 00:39:02 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815223 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="Ua1aM4iJ"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3tY4bvxz9s5L for ; Tue, 19 Sep 2017 10:40:05 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751539AbdISAkD (ORCPT ); Mon, 18 Sep 2017 20:40:03 -0400 Received: from mail-pg0-f49.google.com ([74.125.83.49]:55317 "EHLO mail-pg0-f49.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751499AbdISAkB (ORCPT ); Mon, 18 Sep 2017 20:40:01 -0400 Received: by mail-pg0-f49.google.com with SMTP id b11so1081389pgn.12 for ; Mon, 18 Sep 2017 17:40:00 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=RR4DxypPfqZ8aeG8beor0M3BzaJnXlqfm+25oYwqfi8=; b=Ua1aM4iJ2tH+2+Nx8OL9ZJtwRvApmuVCWHC6xqK8Gk1T2A2mhGngwPchKOwj1FtFum g8Qb5OnfMbFeqm8N22plk3xGxg7rbyhU8YQdxl4c1Dx8fjHOTswoIWw8kuwBjOjob1sF vtfwMTHLclFs/QDnskSHxyR1jWXVbg8bLK26i0ubBNGzYk3+SG8GZBlD27C9O6ij2Ng3 R6x7SdAxc9/L7Pj7c6YjQLjJUIh7BYMCcSborRIEULO/zxyCTsRO2lErv/aV0o8zghqm +ruvHUDpT+M9lj4dETiZvzA5VZrSmAHSqCXJgdFcmbkJulnU61Pim5Fb2SdckM3TdxxF 63/g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=RR4DxypPfqZ8aeG8beor0M3BzaJnXlqfm+25oYwqfi8=; b=FvYs68ENFmUbvkGSUCou1PJ+qpy/LsxZGy7cJB3QJAUAnCFFkI2PGezSR52+oxrO/Y LGzemMfTM0tnq5jyI5DGtzCPvily4JqoMLORaUt66A4qBlOJmj2s4V+VfveHDuyk0Ufq s6Hlhys3uP51aey9mAB0mc6QRsAul30d3x28se9mNQy/1R1qCdwtHImag++xuLNEg880 SdpAiAOShG44PcSjMkdcUZaBlhU/KOXpWp9/M4NTdlZ8V7Q+4VjDnHMf19cCYxHjH4/8 EKrSFYcTQG+y+dvsnAoXN6tj73MnKRxM40a3GeGtXnW1Q0CTwjgTgDG2wQvc1bjn6i0H fo1g== X-Gm-Message-State: AHPjjUjNa0nE0G7322bXPZgQDZ/+8ewjn3NedWEd5Eg5sbHAFirg3cpf TGCr7oMZwRX7rn3U X-Google-Smtp-Source: AOwi7QDd/WVUFcfCsomvvSI/d9ZpR6dY6FlZKKnuYLBW0FFiPxfWYtkJ7JW8w/3gC2zBTE2GgbL2YA== X-Received: by 10.84.216.87 with SMTP id f23mr335308plj.307.1505781600402; Mon, 18 Sep 2017 17:40:00 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.39.58 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:39:59 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 12/14] gtp: Configuration for zero UDP checksum Date: Mon, 18 Sep 2017 17:39:02 -0700 Message-Id: <20170919003904.5124-13-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Add configuration to control use of zero checksums on transmit for both IPv4 and IPv6, and control over accepting zero IPv6 checksums on receive. Signed-off-by: Tom Herbert --- drivers/net/gtp.c | 35 +++++++++++++++++++++++++++++++++-- include/uapi/linux/if_link.h | 4 ++++ 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 393f63cb2576..b53946f8b10b 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -75,6 +75,13 @@ struct pdp_ctx { struct rcu_head rcu_head; struct dst_cache dst_cache; + + unsigned int cfg_flags; + +#define GTP_F_UDP_ZERO_CSUM_TX 0x1 +#define GTP_F_UDP_ZERO_CSUM6_TX 0x2 +#define GTP_F_UDP_ZERO_CSUM6_RX 0x4 + }; /* One instance of the GTP device. */ @@ -536,6 +543,7 @@ static int gtp_xmit(struct sk_buff *skb, struct net_device *dev, struct gtp_dev *gtp = netdev_priv(dev); bool xnet = !net_eq(gtp->net, dev_net(gtp->dev)); struct sock *sk = pctx->sk; + bool udp_csum; int err = 0; /* Ensure there is sufficient headroom. */ @@ -563,11 +571,12 @@ static int gtp_xmit(struct sk_buff *skb, struct net_device *dev, skb_dst_drop(skb); gtp_push_header(skb, pctx); + udp_csum = !(pctx->cfg_flags & GTP_F_UDP_ZERO_CSUM_TX); udp_tunnel_xmit_skb(rt, sk, skb, saddr, pctx->peer_addr_ip4.s_addr, 0, ip4_dst_hoplimit(&rt->dst), 0, pctx->gtp_port, pctx->gtp_port, - xnet, false); + xnet, !udp_csum); netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n", &saddr, &pctx->peer_addr_ip4.s_addr); @@ -591,11 +600,12 @@ static int gtp_xmit(struct sk_buff *skb, struct net_device *dev, skb_dst_drop(skb); gtp_push_header(skb, pctx); + udp_csum = !(pctx->cfg_flags & GTP_F_UDP_ZERO_CSUM6_TX); udp_tunnel6_xmit_skb(dst, sk, skb, dev, &saddr, &pctx->peer_addr_ip6, 0, ip6_dst_hoplimit(dst), 0, pctx->gtp_port, pctx->gtp_port, - true); + !udp_csum); netdev_dbg(dev, "gtp -> IP src: %pI6 dst: %pI6\n", &saddr, &pctx->peer_addr_ip6); @@ -728,6 +738,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, { unsigned int role = GTP_ROLE_GGSN; bool have_fd, have_ports; + unsigned int flags = 0; bool is_ipv6 = false; struct gtp_dev *gtp; struct gtp_net *gn; @@ -747,6 +758,21 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, return -EINVAL; } + if (data[IFLA_GTP_UDP_CSUM]) { + if (!nla_get_u8(data[IFLA_GTP_UDP_CSUM])) + flags |= GTP_F_UDP_ZERO_CSUM_TX; + } + + if (data[IFLA_GTP_UDP_ZERO_CSUM6_TX]) { + if (nla_get_u8(data[IFLA_GTP_UDP_ZERO_CSUM6_TX])) + flags |= GTP_F_UDP_ZERO_CSUM6_TX; + } + + if (data[IFLA_GTP_UDP_ZERO_CSUM6_RX]) { + if (nla_get_u8(data[IFLA_GTP_UDP_ZERO_CSUM6_RX])) + flags |= GTP_F_UDP_ZERO_CSUM6_RX; + } + if (data[IFLA_GTP_AF]) { u16 af = nla_get_u16(data[IFLA_GTP_AF]); @@ -819,6 +845,9 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = { [IFLA_GTP_ROLE] = { .type = NLA_U32 }, [IFLA_GTP_PORT0] = { .type = NLA_U16 }, [IFLA_GTP_PORT1] = { .type = NLA_U16 }, + [IFLA_GTP_UDP_CSUM] = { .type = NLA_U8 }, + [IFLA_GTP_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, + [IFLA_GTP_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, }; static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], @@ -990,6 +1019,8 @@ static struct socket *gtp_create_sock(struct net *net, bool ipv6, if (ipv6) { udp_conf.family = AF_INET6; + udp_conf.use_udp6_rx_checksums = + !(flags & GTP_F_UDP_ZERO_CSUM6_RX); udp_conf.ipv6_v6only = 1; } else { udp_conf.family = AF_INET; diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 81c26864abeb..14a32d745e24 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -555,6 +555,10 @@ enum { IFLA_GTP_AF, IFLA_GTP_PORT0, IFLA_GTP_PORT1, + IFLA_GTP_UDP_CSUM, + IFLA_GTP_UDP_ZERO_CSUM6_TX, + IFLA_GTP_UDP_ZERO_CSUM6_RX, + __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) From patchwork Tue Sep 19 00:39:03 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815224 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="gL5PrtIR"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3tc5Bs7z9s5L for ; Tue, 19 Sep 2017 10:40:08 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751549AbdISAkG (ORCPT ); Mon, 18 Sep 2017 20:40:06 -0400 Received: from mail-pf0-f173.google.com ([209.85.192.173]:45201 "EHLO mail-pf0-f173.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751541AbdISAkD (ORCPT ); Mon, 18 Sep 2017 20:40:03 -0400 Received: by mail-pf0-f173.google.com with SMTP id q76so1109936pfq.2 for ; Mon, 18 Sep 2017 17:40:03 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=TGyPfjP7tOUbqXkiuGDdw5mlZaZd0OcOu8cLIQaFrLk=; b=gL5PrtIR5YGbWZaYMIGrQNBt3Ruw1VrD7EpltZwAvBnDNL19+SarJ577Pw/rEH2wyx /QKd8Fr0GTRk8V/NaSDzprG1bHZ539yZVu2tEuEDcqri7otAtDgnvC741zNdEf6GPkCq KQZFI88Iz7edVjcoDyYWkHkLWuglMUxA3Guh4CQrFVE7a4p+pAbCN57Xd9arQ+D9G5N6 4Ito/Mlzewl2MfUN6+NuB0MiHknNzCffM+1DoLo+X06g0ec2D3MaiKZbC6tWaTG0EQcj peAfLz7j/G4504GhZXrs2noLsDHJ2k44X/o6vkE5qgZGDo+enUDDVVGr2eEJTcqYU5cX aEWA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=TGyPfjP7tOUbqXkiuGDdw5mlZaZd0OcOu8cLIQaFrLk=; b=IfKaT8mwAe7iV/m/g3MxSzzgzA0qI1oRmjvUTLBB2YGiFOvrTttRqsK8ztB0T/ZB+b 3QPN//xdRxUq6OhFCy5YiTGcCkwmphMOM/OKP5m1S6GaD4/l4e3Y27FH7xODYZ6qS4mV YwUMd2VF6rm3/VH1Q1b+FhkLrT8oRjvgGt6KwkReMSFMY9ATrlO0S7ksAof8zNC+9C/v wOr2dqBshNGgCRCtdVjV33milkzE4uhhrByAkMZeexo7bwjQFs9VBIih88c4OyBW42Kg OLph5EEr0hfEY6ng/ked9kzQkFjFfnLd/DR4ATHaTPLBmC3WVxCZghpK/ME6h5NM6GMM VXSg== X-Gm-Message-State: AHPjjUjiotDjW+clKX1d5BP5O+EeXrh/O4Dd7PShohkLkOaiCG6LZwPE z2JHKcFTZHVSFl0Ns60= X-Google-Smtp-Source: AOwi7QBJlzgGCmoJkzX2YXkhgqoYW6fwr+DsG1smaZ2tgnVTxQO2LvjhILGA09urN/0YKIPYEeShWA== X-Received: by 10.99.140.29 with SMTP id m29mr363150pgd.126.1505781602842; Mon, 18 Sep 2017 17:40:02 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.40.01 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:40:01 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 13/14] gtp: Support for GRO Date: Mon, 18 Sep 2017 17:39:03 -0700 Message-Id: <20170919003904.5124-14-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Populate GRO receive and GRO complete functions for GTP-Uv0 and v1. Signed-off-by: Tom Herbert --- drivers/net/gtp.c | 204 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 204 insertions(+) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index b53946f8b10b..2f9d810cf19f 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -429,6 +430,205 @@ static int gtp1u_udp_encap_recv(struct sock *sk, struct sk_buff *skb) return 1; } +static struct sk_buff **gtp_gro_receive_finish(struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb, + void *hdr, size_t hdrlen) +{ + const struct packet_offload *ptype; + struct sk_buff **pp; + __be16 type; + + type = ipver_to_eth((struct iphdr *)((void *)hdr + hdrlen)); + if (!type) + goto out_err; + + rcu_read_lock(); + + ptype = gro_find_receive_by_type(type); + if (!ptype) + goto out_unlock_err; + + skb_gro_pull(skb, hdrlen); + skb_gro_postpull_rcsum(skb, hdr, hdrlen); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); + + rcu_read_unlock(); + + return pp; + +out_unlock_err: + rcu_read_unlock(); +out_err: + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; +} + +static struct sk_buff **gtp0_gro_receive(struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb) +{ + struct gtp0_header *gtp0; + size_t len, hdrlen, off; + struct sk_buff *p; + + off = skb_gro_offset(skb); + len = off + sizeof(*gtp0); + hdrlen = sizeof(*gtp0); + + gtp0 = skb_gro_header_fast(skb, off); + if (skb_gro_header_hard(skb, len)) { + gtp0 = skb_gro_header_slow(skb, len, off); + if (unlikely(!gtp0)) + goto out; + } + + if ((gtp0->flags >> 5) != GTP_V0 || gtp0->type != GTP_TPDU) + goto out; + + hdrlen += sizeof(*gtp0); + + /* To get IP version */ + len += sizeof(struct iphdr); + + /* Now get header with GTP header an IPv4 header (for version) */ + if (skb_gro_header_hard(skb, len)) { + gtp0 = skb_gro_header_slow(skb, len, off); + if (unlikely(!gtp0)) + goto out; + } + + for (p = *head; p; p = p->next) { + const struct gtp0_header *gtp0_t; + + if (!NAPI_GRO_CB(p)->same_flow) + continue; + + gtp0_t = (struct gtp0_header *)(p->data + off); + + if (gtp0->flags != gtp0_t->flags || + gtp0->type != gtp0_t->type || + gtp0->flow != gtp0_t->flow || + gtp0->tid != gtp0_t->tid) { + NAPI_GRO_CB(p)->same_flow = 0; + continue; + } + } + + return gtp_gro_receive_finish(sk, head, skb, gtp0, hdrlen); + +out: + NAPI_GRO_CB(skb)->flush |= 1; + + return NULL; +} + +static struct sk_buff **gtp1u_gro_receive(struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb) +{ + struct gtp1_header *gtp1; + size_t len, hdrlen, off; + struct sk_buff *p; + + off = skb_gro_offset(skb); + len = off + sizeof(*gtp1); + hdrlen = sizeof(*gtp1); + + gtp1 = skb_gro_header_fast(skb, off); + if (skb_gro_header_hard(skb, len)) { + gtp1 = skb_gro_header_slow(skb, len, off); + if (unlikely(!gtp1)) + goto out; + } + + if ((gtp1->flags >> 5) != GTP_V1 || gtp1->type != GTP_TPDU) + goto out; + + if (gtp1->flags & GTP1_F_MASK) { + hdrlen += 4; + len += 4; + } + + len += sizeof(struct iphdr); + + /* Now get header with GTP header an IPv4 header (for version) */ + if (skb_gro_header_hard(skb, len)) { + gtp1 = skb_gro_header_slow(skb, len, off); + if (unlikely(!gtp1)) + goto out; + } + + for (p = *head; p; p = p->next) { + const struct gtp1_header *gtp1_t; + + if (!NAPI_GRO_CB(p)->same_flow) + continue; + + gtp1_t = (struct gtp1_header *)(p->data + off); + + if (gtp1->flags != gtp1_t->flags || + gtp1->type != gtp1_t->type || + gtp1->tid != gtp1_t->tid) { + NAPI_GRO_CB(p)->same_flow = 0; + continue; + } + } + + return gtp_gro_receive_finish(sk, head, skb, gtp1, hdrlen); + +out: + NAPI_GRO_CB(skb)->flush = 1; + + return NULL; +} + +static int gtp_gro_complete_finish(struct sock *sk, struct sk_buff *skb, + int nhoff, size_t hdrlen) +{ + struct packet_offload *ptype; + int err = -EINVAL; + __be16 type; + + type = ipver_to_eth((struct iphdr *)(skb->data + nhoff + hdrlen)); + if (!type) + return err; + + rcu_read_lock(); + ptype = gro_find_complete_by_type(type); + if (ptype) + err = ptype->callbacks.gro_complete(skb, nhoff + hdrlen); + + rcu_read_unlock(); + + skb_set_inner_mac_header(skb, nhoff + hdrlen); + + return err; +} + +static int gtp0_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) +{ + struct gtp0_header *gtp0 = (struct gtp0_header *)(skb->data + nhoff); + size_t hdrlen = sizeof(struct gtp0_header); + + gtp0->length = htons(skb->len - nhoff - hdrlen); + + return gtp_gro_complete_finish(sk, skb, nhoff, hdrlen); +} + +static int gtp1u_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) +{ + struct gtp1_header *gtp1 = (struct gtp1_header *)(skb->data + nhoff); + size_t hdrlen = sizeof(struct gtp1_header); + + if (gtp1->flags & GTP1_F_MASK) + hdrlen += 4; + + gtp1->length = htons(skb->len - nhoff - hdrlen); + + return gtp_gro_complete_finish(sk, skb, nhoff, hdrlen); +} + static void gtp_encap_destroy(struct sock *sk) { struct gtp_dev *gtp; @@ -946,9 +1146,13 @@ static int gtp_encap_enable_sock(struct socket *sock, int type, switch (type) { case UDP_ENCAP_GTP0: tuncfg.encap_rcv = gtp0_udp_encap_recv; + tuncfg.gro_receive = gtp0_gro_receive; + tuncfg.gro_complete = gtp0_gro_complete; break; case UDP_ENCAP_GTP1U: tuncfg.encap_rcv = gtp1u_udp_encap_recv; + tuncfg.gro_receive = gtp1u_gro_receive; + tuncfg.gro_complete = gtp1u_gro_complete; break; default: pr_debug("Unknown encap type %u\n", type); From patchwork Tue Sep 19 00:39:04 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tom Herbert X-Patchwork-Id: 815225 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=quantonium-net.20150623.gappssmtp.com header.i=@quantonium-net.20150623.gappssmtp.com header.b="BpoVEtDW"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3xx3tk5QpYz9s5L for ; Tue, 19 Sep 2017 10:40:14 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751572AbdISAkM (ORCPT ); Mon, 18 Sep 2017 20:40:12 -0400 Received: from mail-pg0-f44.google.com ([74.125.83.44]:46875 "EHLO mail-pg0-f44.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750925AbdISAkF (ORCPT ); Mon, 18 Sep 2017 20:40:05 -0400 Received: by mail-pg0-f44.google.com with SMTP id i130so1093747pgc.3 for ; Mon, 18 Sep 2017 17:40:05 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quantonium-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=yD6rTQ88tAwG7ClRyMr2nyWuITRx1lwY2PD/sJZPSI8=; b=BpoVEtDWlfRnhNT8YynL0O7MZZq50XyHOc9IwYQcLl+/EQNpfc6PbBsD2em16KHEy8 3FmZnEpw66lbjvLvIlXkKjAo4gqdSyjh9bG4ZfpRHmKIMCAo7/MrWVTJs5B0ZFA/OC1a hcKmQZ7FQCueYVAr+kB/VMbmnm2OahDQQeILaZc+YtlyW2n27wGwmiUre55KvKJMgop0 /khP2WnNAt8DJUJzN84QFBp0GsltLA9YY1HRiPlWmJWef373Esfk4/ky0ot13xHXhss8 iW0aw1Lc65RzL12a44eADH78Or/SJatqoGnY8GqNDldNb1InatGu1vVpe4JCR8wz+L7C Wajg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=yD6rTQ88tAwG7ClRyMr2nyWuITRx1lwY2PD/sJZPSI8=; b=Tv+Mme4SeFl8OZcUfHAS9UBdcj8QCvLUJmUA0Y13uyFvhpVt17rpSZsWfbaOLkUvNV nZhaLQEmkUGrX61Y7xwPjjdlWHUb2hXoa3nTiunqcp8GFjvB7VIyRKOCEc20y4kV/BGP t2BcMhnPaFkTJ5U1v45QqGJulfibmyn9Sw8+Y8HhBXaA3HAsl0h/hFqx55X7BHsI2HOX cbDtlX7CwoHoHkcsB4mGmKQGpA+HDhFt5lvAu0fzMXdgOlkUSggawYi5WPYdY7A0uekb pBN+80DxvIOBkezgZi6jMVxZVztmF6uLrQZaYfzLSmcNEtf9hp5MXkETezvdBrVWxbF8 rRkA== X-Gm-Message-State: AHPjjUglDjcZWtajAho8DF1P3DxoKGChPD2BHf+yRX+RZy0dA7jqkxhn iBcv4O8Cp8WUpXcd X-Google-Smtp-Source: AOwi7QC49qujAUBPt/9PK/H+wZKPCm1OO4x7oHlotAJZFo2ZCmcJnjWpcxWn4ApBbnn8UZ+l2MPyRA== X-Received: by 10.99.53.3 with SMTP id c3mr365571pga.220.1505781605058; Mon, 18 Sep 2017 17:40:05 -0700 (PDT) Received: from localhost.localdomain (c-73-162-13-107.hsd1.ca.comcast.net. [73.162.13.107]) by smtp.gmail.com with ESMTPSA id k78sm662018pfb.157.2017.09.18.17.40.03 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 18 Sep 2017 17:40:04 -0700 (PDT) From: Tom Herbert To: davem@davemloft.net Cc: netdev@vger.kernel.org, pablo@netfilter.org, laforge@gnumonks.org, rohit@quantonium.net, Tom Herbert Subject: [PATCH net-next 14/14] gtp: GSO support Date: Mon, 18 Sep 2017 17:39:04 -0700 Message-Id: <20170919003904.5124-15-tom@quantonium.net> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170919003904.5124-1-tom@quantonium.net> References: <20170919003904.5124-1-tom@quantonium.net> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Need to define a gtp_gso_segment since the GTP header includes a length field that must be set per packet. Also, GPv0 header includes a sequence number that is incremented per packet. Signed-off-by: Tom Herbert --- drivers/net/gtp.c | 176 +++++++++++++++++++++++++++++++++++++++---- include/uapi/linux/if_link.h | 1 - 2 files changed, 163 insertions(+), 14 deletions(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 2f9d810cf19f..a2c4d9804a8f 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -120,6 +120,8 @@ static u32 gtp_h_initval; static void pdp_context_delete(struct pdp_ctx *pctx); +static int gtp_gso_type; + static inline u32 gtp0_hashfn(u64 tid) { u32 *tid32 = (u32 *) &tid; @@ -430,6 +432,69 @@ static int gtp1u_udp_encap_recv(struct sock *sk, struct sk_buff *skb) return 1; } +static struct sk_buff *gtp_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EINVAL); + int tnl_hlen = skb->mac_len; + struct gtp0_header *gtp0; + + if (unlikely(!pskb_may_pull(skb, tnl_hlen))) + return ERR_PTR(-EINVAL); + + /* Make sure we have a mininal GTP header */ + if (unlikely(tnl_hlen < min_t(size_t, sizeof(struct gtp0_header), + sizeof(struct gtp1_header)))) + return ERR_PTR(-EINVAL); + + /* Determine version */ + gtp0 = (struct gtp0_header *)skb->data; + switch (gtp0->flags >> 5) { + case GTP_V0: { + u16 tx_seq; + + if (unlikely(tnl_hlen != sizeof(struct gtp0_header))) + return ERR_PTR(-EINVAL); + + tx_seq = ntohs(gtp0->seq); + + /* segment inner packet. */ + segs = skb_mac_gso_segment(skb, features); + if (!IS_ERR_OR_NULL(segs)) { + skb = segs; + do { + gtp0 = (struct gtp0_header *) + skb_mac_header(skb); + gtp0->length = ntohs(skb->len - tnl_hlen); + gtp0->seq = htons(tx_seq); + tx_seq++; + } while ((skb = skb->next)); + } + break; + } + case GTP_V1: { + struct gtp1_header *gtp1; + + if (unlikely(tnl_hlen != sizeof(struct gtp1_header))) + return ERR_PTR(-EINVAL); + + /* segment inner packet. */ + segs = skb_mac_gso_segment(skb, features); + if (!IS_ERR_OR_NULL(segs)) { + skb = segs; + do { + gtp1 = (struct gtp1_header *) + skb_mac_header(skb); + gtp1->length = ntohs(skb->len - tnl_hlen); + } while ((skb = skb->next)); + } + break; + } + } + + return segs; +} + static struct sk_buff **gtp_gro_receive_finish(struct sock *sk, struct sk_buff **head, struct sk_buff *skb, @@ -688,18 +753,25 @@ static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) { int payload_len = skb->len; struct gtp0_header *gtp0; + u32 tx_seq; gtp0 = skb_push(skb, sizeof(*gtp0)); gtp0->flags = 0x1e; /* v0, GTP-non-prime. */ gtp0->type = GTP_TPDU; gtp0->length = htons(payload_len); - gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % - 0xffff); gtp0->flow = htons(pctx->u.v0.flow); gtp0->number = 0xff; gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff; gtp0->tid = cpu_to_be64(pctx->u.v0.tid); + + /* If skb is GSO allocate sequence numbers for all the segments */ + tx_seq = skb_shinfo(skb)->gso_segs ? + atomic_add_return(skb_shinfo(skb)->gso_segs, + &pctx->tx_seq) : + atomic_inc_return(&pctx->tx_seq); + + gtp0->seq = (htons((u16)tx_seq) - 1) & 0xffff; } static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) @@ -737,6 +809,59 @@ static void gtp_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) } } +static size_t gtp_max_header_len(int version) + +{ + switch (version) { + case GTP_V0: + return sizeof(struct gtp0_header); + case GTP_V1: + return sizeof(struct gtp1_header) + 4; + } + + /* Should not happen */ + return 0; +} + +static int gtp_build_skb(struct sk_buff *skb, struct dst_entry *dst, + struct pdp_ctx *pctx, bool xnet, int ip_hdr_len, + bool udp_sum) +{ + int type = (udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL) | + gtp_gso_type; + int min_headroom; + u16 protocol; + int err; + + skb_scrub_packet(skb, xnet); + + min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + + gtp_max_header_len(pctx->gtp_version) + ip_hdr_len; + + err = skb_cow_head(skb, min_headroom); + if (unlikely(err)) + goto free_dst; + + err = iptunnel_handle_offloads(skb, type); + if (err) + goto free_dst; + + protocol = ipver_to_eth(ip_hdr(skb)); + + gtp_push_header(skb, pctx); + + /* GTP header is treated as inner MAC header */ + skb_reset_inner_mac_header(skb); + + skb_set_inner_protocol(skb, protocol); + + return 0; + +free_dst: + dst_release(dst); + return err; +} + static int gtp_xmit(struct sk_buff *skb, struct net_device *dev, struct pdp_ctx *pctx) { @@ -746,13 +871,6 @@ static int gtp_xmit(struct sk_buff *skb, struct net_device *dev, bool udp_csum; int err = 0; - /* Ensure there is sufficient headroom. */ - err = skb_cow_head(skb, dev->needed_headroom); - if (unlikely(err)) - goto out_err; - - skb_reset_inner_headers(skb); - if (pctx->peer_af == AF_INET) { __be32 saddr = inet_sk(sk)->inet_saddr; struct rtable *rt; @@ -768,9 +886,13 @@ static int gtp_xmit(struct sk_buff *skb, struct net_device *dev, goto out_err; } - skb_dst_drop(skb); + err = gtp_build_skb(skb, &rt->dst, pctx, xnet, + sizeof(struct iphdr), + !(pctx->cfg_flags & + GTP_F_UDP_ZERO_CSUM_TX)); + if (err) + goto out_err; - gtp_push_header(skb, pctx); udp_csum = !(pctx->cfg_flags & GTP_F_UDP_ZERO_CSUM_TX); udp_tunnel_xmit_skb(rt, sk, skb, saddr, pctx->peer_addr_ip4.s_addr, @@ -797,9 +919,13 @@ static int gtp_xmit(struct sk_buff *skb, struct net_device *dev, goto out_err; } - skb_dst_drop(skb); + err = gtp_build_skb(skb, dst, pctx, xnet, + sizeof(struct ipv6hdr), + !(pctx->cfg_flags & + GTP_F_UDP_ZERO_CSUM6_TX)); + if (err) + goto out_err; - gtp_push_header(skb, pctx); udp_csum = !(pctx->cfg_flags & GTP_F_UDP_ZERO_CSUM6_TX); udp_tunnel6_xmit_skb(dst, sk, skb, dev, &saddr, &pctx->peer_addr_ip6, @@ -898,6 +1024,12 @@ static const struct net_device_ops gtp_netdev_ops = { .ndo_get_stats64 = ip_tunnel_get_stats64, }; +#define GTP_FEATURES (NETIF_F_SG | \ + NETIF_F_FRAGLIST | \ + NETIF_F_HIGHDMA | \ + NETIF_F_GSO_SOFTWARE | \ + NETIF_F_HW_CSUM) + static void gtp_link_setup(struct net_device *dev) { struct gtp_dev *gtp = netdev_priv(dev); @@ -912,7 +1044,13 @@ static void gtp_link_setup(struct net_device *dev) dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->priv_flags |= IFF_NO_QUEUE; + dev->features |= NETIF_F_LLTX; + dev->features |= GTP_FEATURES; + + dev->hw_features |= GTP_FEATURES; + dev->hw_features |= NETIF_F_GSO_SOFTWARE; + netif_keep_dst(dev); /* Assume largest header, ie. GTPv0. */ @@ -1903,6 +2041,11 @@ static struct pernet_operations gtp_net_ops = { .size = sizeof(struct gtp_net), }; +static const struct skb_gso_app gtp_gso_app = { + .check_flags = SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM, + .gso_segment = gtp_gso_segment, +}; + static int __init gtp_init(void) { int err; @@ -1921,6 +2064,10 @@ static int __init gtp_init(void) if (err < 0) goto unreg_genl_family; + gtp_gso_type = skb_gso_app_register(>p_gso_app); + if (!gtp_gso_type) + pr_warn("GTP unable to create UDP app gso type"); + pr_info("GTP module loaded (pdp ctx size %zd bytes)\n", sizeof(struct pdp_ctx)); return 0; @@ -1937,6 +2084,9 @@ late_initcall(gtp_init); static void __exit gtp_fini(void) { + if (gtp_gso_type) + skb_gso_app_unregister(gtp_gso_type, >p_gso_app); + unregister_pernet_subsys(>p_net_ops); genl_unregister_family(>p_genl_family); rtnl_link_unregister(>p_link_ops); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 14a32d745e24..7c15db44eab3 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -558,7 +558,6 @@ enum { IFLA_GTP_UDP_CSUM, IFLA_GTP_UDP_ZERO_CSUM6_TX, IFLA_GTP_UDP_ZERO_CSUM6_RX, - __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)