From patchwork Tue May 12 07:39:15 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Pirko X-Patchwork-Id: 471174 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id B02C71400B7 for ; Tue, 12 May 2015 17:40:07 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753229AbbELHkD (ORCPT ); Tue, 12 May 2015 03:40:03 -0400 Received: from mail-wg0-f46.google.com ([74.125.82.46]:36380 "EHLO mail-wg0-f46.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753201AbbELHj6 (ORCPT ); Tue, 12 May 2015 03:39:58 -0400 Received: by wggj6 with SMTP id j6so18165287wgg.3 for ; Tue, 12 May 2015 00:39:57 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=cjplIJxXRNrQWexSxyOUeUAFbcuOfpItrybN7cHa3rc=; b=a+ng9vznV+V4DQ7aCf580oZahvj+TTQErJNdHBpqFRJFmd/Lbt5jYDzuLAryZIoKsQ e4uOv8e/o7F4fIE/TWMM2rMugYwPIPSVOt+OUekQOKW/7Vz9GRnpT80FpS95+bJl10ut 4RQFScUf34HlimubyU3Xmrk5f+xR0bnCpHQif75Y+U+SwlFxRKkoOfjzSlzf14CuIybo HlIZxL44Bm86J2fyeTmw83pF7tXmk7qUxnEVMTLlVrGyLw26ZrrYl/7aFC1BXpf49Qvi L6qbnW5LUt6gssfYswxTvNYnSqLPruVdSqDvnZ/XA3cCYKa+QRgWSh9x73q90FxFO0tR JCdg== X-Gm-Message-State: ALoCoQmOrhJ9+xKgElC4+aqp76pGITLYCLPL+6pa2wae26JsMprEBAShYFzrv2NYkM4MXyEj4swS X-Received: by 10.180.80.38 with SMTP id o6mr26572331wix.80.1431416397232; Tue, 12 May 2015 00:39:57 -0700 (PDT) Received: from localhost (b.pirko.cz. [31.31.77.168]) by mx.google.com with ESMTPSA id jq3sm26332043wjc.22.2015.05.12.00.39.56 (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Tue, 12 May 2015 00:39:56 -0700 (PDT) From: Jiri Pirko To: netdev@vger.kernel.org Cc: davem@davemloft.net, jhs@mojatatu.com, tgraf@suug.ch, jesse@nicira.com, kaber@trash.net, tom@herbertland.com, edumazet@google.com, alexander.h.duyck@redhat.com, hannes@stressinduktion.org, ast@plumgrid.com, daniel@iogearbox.net, herbert@gondor.apana.org.au, cwang@twopensource.com, john.fastabend@gmail.com Subject: [patch net-next v2 07/15] net: move netdev_pick_tx and dependencies to net/core/dev.c Date: Tue, 12 May 2015 09:39:15 +0200 Message-Id: <1431416363-5675-8-git-send-email-jiri@resnulli.us> X-Mailer: git-send-email 2.4.0 In-Reply-To: <1431416363-5675-1-git-send-email-jiri@resnulli.us> References: <1431416363-5675-1-git-send-email-jiri@resnulli.us> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org next to its user. No relation to flow_dissector so it makes no sense to have it in flow_dissector.c Signed-off-by: Jiri Pirko --- net/core/dev.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++ net/core/flow_dissector.c | 78 ----------------------------------------------- 2 files changed, 78 insertions(+), 78 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 3e7838f..8ef30be 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2936,6 +2936,84 @@ int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(dev_loopback_xmit); +static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + struct xps_dev_maps *dev_maps; + struct xps_map *map; + int queue_index = -1; + + rcu_read_lock(); + dev_maps = rcu_dereference(dev->xps_maps); + if (dev_maps) { + map = rcu_dereference( + dev_maps->cpu_map[skb->sender_cpu - 1]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else + queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), + map->len)]; + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + } + rcu_read_unlock(); + + return queue_index; +#else + return -1; +#endif +} + +static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + + if (queue_index < 0 || skb->ooo_okay || + queue_index >= dev->real_num_tx_queues) { + int new_index = get_xps_queue(dev, skb); + if (new_index < 0) + new_index = skb_tx_hash(dev, skb); + + if (queue_index != new_index && sk && + rcu_access_pointer(sk->sk_dst_cache)) + sk_tx_queue_set(sk, new_index); + + queue_index = new_index; + } + + return queue_index; +} + +struct netdev_queue *netdev_pick_tx(struct net_device *dev, + struct sk_buff *skb, + void *accel_priv) +{ + int queue_index = 0; + +#ifdef CONFIG_XPS + if (skb->sender_cpu == 0) + skb->sender_cpu = raw_smp_processor_id() + 1; +#endif + + if (dev->real_num_tx_queues != 1) { + const struct net_device_ops *ops = dev->netdev_ops; + if (ops->ndo_select_queue) + queue_index = ops->ndo_select_queue(dev, skb, accel_priv, + __netdev_pick_tx); + else + queue_index = __netdev_pick_tx(dev, skb); + + if (!accel_priv) + queue_index = netdev_cap_txqueue(dev, queue_index); + } + + skb_set_queue_mapping(skb, queue_index); + return netdev_get_tx_queue(dev, queue_index); +} + /** * __dev_queue_xmit - transmit a buffer * @skb: buffer to transmit diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 07ca11d..04f8723 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -431,81 +431,3 @@ u32 skb_get_poff(const struct sk_buff *skb) return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb)); } - -static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) -{ -#ifdef CONFIG_XPS - struct xps_dev_maps *dev_maps; - struct xps_map *map; - int queue_index = -1; - - rcu_read_lock(); - dev_maps = rcu_dereference(dev->xps_maps); - if (dev_maps) { - map = rcu_dereference( - dev_maps->cpu_map[skb->sender_cpu - 1]); - if (map) { - if (map->len == 1) - queue_index = map->queues[0]; - else - queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), - map->len)]; - if (unlikely(queue_index >= dev->real_num_tx_queues)) - queue_index = -1; - } - } - rcu_read_unlock(); - - return queue_index; -#else - return -1; -#endif -} - -static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) -{ - struct sock *sk = skb->sk; - int queue_index = sk_tx_queue_get(sk); - - if (queue_index < 0 || skb->ooo_okay || - queue_index >= dev->real_num_tx_queues) { - int new_index = get_xps_queue(dev, skb); - if (new_index < 0) - new_index = skb_tx_hash(dev, skb); - - if (queue_index != new_index && sk && - rcu_access_pointer(sk->sk_dst_cache)) - sk_tx_queue_set(sk, new_index); - - queue_index = new_index; - } - - return queue_index; -} - -struct netdev_queue *netdev_pick_tx(struct net_device *dev, - struct sk_buff *skb, - void *accel_priv) -{ - int queue_index = 0; - -#ifdef CONFIG_XPS - if (skb->sender_cpu == 0) - skb->sender_cpu = raw_smp_processor_id() + 1; -#endif - - if (dev->real_num_tx_queues != 1) { - const struct net_device_ops *ops = dev->netdev_ops; - if (ops->ndo_select_queue) - queue_index = ops->ndo_select_queue(dev, skb, accel_priv, - __netdev_pick_tx); - else - queue_index = __netdev_pick_tx(dev, skb); - - if (!accel_priv) - queue_index = netdev_cap_txqueue(dev, queue_index); - } - - skb_set_queue_mapping(skb, queue_index); - return netdev_get_tx_queue(dev, queue_index); -}