From patchwork Mon Jul 25 21:38:16 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mahesh Bandewar X-Patchwork-Id: 652407 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3ryvl56BMLz9sR8 for ; Tue, 26 Jul 2016 07:38:41 +1000 (AEST) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=bandewar-net.20150623.gappssmtp.com header.i=@bandewar-net.20150623.gappssmtp.com header.b=PO+UNqzc; dkim-atps=neutral Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755094AbcGYVih (ORCPT ); Mon, 25 Jul 2016 17:38:37 -0400 Received: from mail-pf0-f193.google.com ([209.85.192.193]:35226 "EHLO mail-pf0-f193.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753309AbcGYVie (ORCPT ); Mon, 25 Jul 2016 17:38:34 -0400 Received: by mail-pf0-f193.google.com with SMTP id h186so12559778pfg.2 for ; Mon, 25 Jul 2016 14:38:34 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=bandewar-net.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id; bh=33MaKMJJxmzm0vfyZ1KIiLY/t7UyMjz1/56HSfMVp90=; b=PO+UNqzcne7mJuaxVtGLKXikXcPjoMVimxJCuxtyQSpqLcy3HsLFybFPDQJikJERZC sXkF946Pw44OIFzkNMrOlz4sSgJjrnpBHtvxFHbgcNomFpYWKJ7GD1QWp4Yn/55DS9Ih WIafJW3xzl1Dc3UiFl0D46J1n6BIbHuKbYdlI75gCHTh5FvyotmLwFks4WCahGF17W/s CqHovDiy3Z0WEhv6zgJRxJAXjPg0OfRYCtiUG0G0xlEk3cmL+V1PwZtRoJObGL+cmTku rPzz0omYe/NuVDQN0h2I0FzvHbP+VyVUDtejfHhWG1ywbLmMMM5PUvAqU2NxAk/LGNwI Fejw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id; bh=33MaKMJJxmzm0vfyZ1KIiLY/t7UyMjz1/56HSfMVp90=; b=JSzH8txVcmxmfmvVN9ylQLospg01QKlUQ0b//ElY4BednTyZvepLzVi1FfabNtCmI9 j+Z+COVJo1y2khr4fbK9xa8PqyYWsBXKitVCDR+oEUJr1miEyHDMvVrW1DVvU3Xd+LB/ wrwVBtK9jncCpO9o4c/YxRB6cU2JUUAcwWDpd9q5K+BWDldwbTY9tVj/BHElmcdVYMr0 NOk0KRQzhhDrLmcd8CrvEoLHH1YIb/o4iobN+H59dzSzfL6bCltgkmC+9SnOiFkW3wq5 LBtytRHUqlTq/dP+AssG5WMnB8dZWVh+7qjg9wzW2EtMVcnAq5OkpUSs9pXJHfbfGC7v xnXg== X-Gm-Message-State: AEkooutjtAgxnzB/IeZ0sI7waBZ3Zc0a/iPgx+TwQxs4BRsgO217pUMhD61i3NWJcd49UA== X-Received: by 10.98.129.5 with SMTP id t5mr33064553pfd.32.1469482713465; Mon, 25 Jul 2016 14:38:33 -0700 (PDT) Received: from localhost ([2620:0:1000:3012:2485:2fd3:84b4:6599]) by smtp.gmail.com with ESMTPSA id ya6sm42445674pab.22.2016.07.25.14.38.32 (version=TLS1_2 cipher=AES128-SHA bits=128/128); Mon, 25 Jul 2016 14:38:32 -0700 (PDT) From: Mahesh Bandewar To: David Miller Cc: Mahesh Bandewar , Eric Dumazet , netdev Subject: [PATCH next] ipvlan: Scrub skb before crossing the namespace boundry Date: Mon, 25 Jul 2016 14:38:16 -0700 Message-Id: <1469482696-32271-1-git-send-email-mahesh@bandewar.net> X-Mailer: git-send-email 2.8.0.rc3.226.g39d4020 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Mahesh Bandewar The earlier patch c3aaa06d5a63 (ipvlan: scrub skb before routing in L3 mode.) did this but only for TX path in L3 mode. This patch extends it for both the modes for TX/RX path. Signed-off-by: Mahesh Bandewar --- drivers/net/ipvlan/ipvlan_core.c | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index d6d0524ee5fd..b5f9511d819e 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -254,6 +254,18 @@ acct: } } +static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev) +{ + bool xnet = true; + + if (dev) + xnet = !net_eq(dev_net(skb->dev), dev_net(dev)); + + skb_scrub_packet(skb, xnet); + if (dev) + skb->dev = dev; +} + static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, bool local) { @@ -280,7 +292,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, *pskb = skb; } - skb->dev = dev; + ipvlan_skb_crossing_ns(skb, dev); if (local) { skb->pkt_type = PACKET_HOST; @@ -347,7 +359,7 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, return addr; } -static int ipvlan_process_v4_outbound(struct sk_buff *skb, bool xnet) +static int ipvlan_process_v4_outbound(struct sk_buff *skb) { const struct iphdr *ip4h = ip_hdr(skb); struct net_device *dev = skb->dev; @@ -370,7 +382,6 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb, bool xnet) ip_rt_put(rt); goto err; } - skb_scrub_packet(skb, xnet); skb_dst_set(skb, &rt->dst); err = ip_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(err))) @@ -385,7 +396,7 @@ out: return ret; } -static int ipvlan_process_v6_outbound(struct sk_buff *skb, bool xnet) +static int ipvlan_process_v6_outbound(struct sk_buff *skb) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); struct net_device *dev = skb->dev; @@ -408,7 +419,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb, bool xnet) dst_release(dst); goto err; } - skb_scrub_packet(skb, xnet); skb_dst_set(skb, dst); err = ip6_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(err))) @@ -423,7 +433,7 @@ out: return ret; } -static int ipvlan_process_outbound(struct sk_buff *skb, bool xnet) +static int ipvlan_process_outbound(struct sk_buff *skb) { struct ethhdr *ethh = eth_hdr(skb); int ret = NET_XMIT_DROP; @@ -447,9 +457,9 @@ static int ipvlan_process_outbound(struct sk_buff *skb, bool xnet) } if (skb->protocol == htons(ETH_P_IPV6)) - ret = ipvlan_process_v6_outbound(skb, xnet); + ret = ipvlan_process_v6_outbound(skb); else if (skb->protocol == htons(ETH_P_IP)) - ret = ipvlan_process_v4_outbound(skb, xnet); + ret = ipvlan_process_v4_outbound(skb); else { pr_warn_ratelimited("Dropped outbound packet type=%x\n", ntohs(skb->protocol)); @@ -485,7 +495,6 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) void *lyr3h; struct ipvl_addr *addr; int addr_type; - bool xnet; lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); if (!lyr3h) @@ -496,9 +505,8 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) return ipvlan_rcv_frame(addr, &skb, true); out: - xnet = !net_eq(dev_net(skb->dev), dev_net(ipvlan->phy_dev)); - skb->dev = ipvlan->phy_dev; - return ipvlan_process_outbound(skb, xnet); + ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev); + return ipvlan_process_outbound(skb); } static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) @@ -528,11 +536,12 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) return dev_forward_skb(ipvlan->phy_dev, skb); } else if (is_multicast_ether_addr(eth->h_dest)) { + ipvlan_skb_crossing_ns(skb, NULL); ipvlan_multicast_enqueue(ipvlan->port, skb); return NET_XMIT_SUCCESS; } - skb->dev = ipvlan->phy_dev; + ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev); return dev_queue_xmit(skb); } @@ -622,8 +631,10 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, * when work-queue processes this frame. This is * achieved by returning RX_HANDLER_PASS. */ - if (nskb) + if (nskb) { + ipvlan_skb_crossing_ns(nskb, NULL); ipvlan_multicast_enqueue(port, nskb); + } } } else { struct ipvl_addr *addr;