From patchwork Wed Dec 9 17:23:50 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gregory CLEMENT X-Patchwork-Id: 554726 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 0422E1402CD for ; Thu, 10 Dec 2015 04:25:59 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753150AbbLIRZL (ORCPT ); Wed, 9 Dec 2015 12:25:11 -0500 Received: from down.free-electrons.com ([37.187.137.238]:40367 "EHLO mail.free-electrons.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1752965AbbLIRYe (ORCPT ); Wed, 9 Dec 2015 12:24:34 -0500 Received: by mail.free-electrons.com (Postfix, from userid 110) id C3BD83D0; Wed, 9 Dec 2015 18:24:31 +0100 (CET) X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on mail.free-electrons.com X-Spam-Level: X-Spam-Status: No, score=-1.0 required=5.0 tests=ALL_TRUSTED,SHORTCIRCUIT, URIBL_BLOCKED shortcircuit=ham autolearn=disabled version=3.4.0 Received: from localhost (81-67-231-93.rev.numericable.fr [81.67.231.93]) by mail.free-electrons.com (Postfix) with ESMTPSA id 332EC1743; Wed, 9 Dec 2015 18:23:56 +0100 (CET) From: Gregory CLEMENT To: "David S. Miller" , linux-kernel@vger.kernel.org, netdev@vger.kernel.org, Thomas Petazzoni Cc: Jason Cooper , Andrew Lunn , Sebastian Hesselbarth , Gregory CLEMENT , Ezequiel Garcia , linux-arm-kernel@lists.infradead.org, Lior Amsalem , Nadav Haklai , Marcin Wojtas , Simon Guinot , Maxime Ripard , Boris BREZILLON , Russell King - ARM Linux , Willy Tarreau , Arnd Bergmann Subject: [PATCH net-next v3 3/4] net: mvneta: Add naive RSS support Date: Wed, 9 Dec 2015 18:23:50 +0100 Message-Id: <1449681831-2798-4-git-send-email-gregory.clement@free-electrons.com> X-Mailer: git-send-email 2.5.0 In-Reply-To: <1449681831-2798-1-git-send-email-gregory.clement@free-electrons.com> References: <1449681831-2798-1-git-send-email-gregory.clement@free-electrons.com> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This patch adds the support for the RSS related ethtool function. Currently it only uses one entry in the indirection table which allows associating an mvneta interface to a given CPU. Signed-off-by: Gregory CLEMENT Tested-by: Marcin Wojtas --- drivers/net/ethernet/marvell/mvneta.c | 127 +++++++++++++++++++++++++++++++++- 1 file changed, 126 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 8974ab084839..e0dba6869605 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -259,6 +259,11 @@ #define MVNETA_TX_MTU_MAX 0x3ffff +/* The RSS lookup table actually has 256 entries but we do not use + * them yet + */ +#define MVNETA_RSS_LU_TABLE_SIZE 1 + /* TSO header size */ #define TSO_HEADER_SIZE 128 @@ -380,6 +385,8 @@ struct mvneta_port { int use_inband_status:1; u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; + + u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; }; /* The mvneta_tx_desc and mvneta_rx_desc structures describe the @@ -1027,7 +1034,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp) if ((rxq % max_cpu) == cpu) rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); - if (cpu == rxq_def) + if (cpu == pp->rxq_def) txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); @@ -2483,6 +2490,18 @@ static void mvneta_percpu_unmask_interrupt(void *arg) MVNETA_MISCINTR_INTR_MASK); } +static void mvneta_percpu_mask_interrupt(void *arg) +{ + struct mvneta_port *pp = arg; + + /* All the queue are masked, but actually only the ones + * maped to this CPU will be masked + */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); +} + static void mvneta_start_dev(struct mvneta_port *pp) { unsigned int cpu; @@ -3173,6 +3192,106 @@ static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) return -EOPNOTSUPP; } +static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) +{ + return MVNETA_RSS_LU_TABLE_SIZE; +} + +static int mvneta_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = rxq_number; + return 0; + case ETHTOOL_GRXFH: + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +static int mvneta_config_rss(struct mvneta_port *pp) +{ + int cpu; + u32 val; + + netif_tx_stop_all_queues(pp->dev); + + for_each_online_cpu(cpu) + smp_call_function_single(cpu, mvneta_percpu_mask_interrupt, + pp, true); + + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *pcpu_port = + per_cpu_ptr(pp->ports, cpu); + + napi_synchronize(&pcpu_port->napi); + napi_disable(&pcpu_port->napi); + } + + pp->rxq_def = pp->indir[0]; + + /* Update unicast mapping */ + mvneta_set_rx_mode(pp->dev); + + /* Update val of portCfg register accordingly with all RxQueue types */ + val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); + mvreg_write(pp, MVNETA_PORT_CONFIG, val); + + /* Update the elected CPU matching the new rxq_def */ + mvneta_percpu_elect(pp); + + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *pcpu_port = + per_cpu_ptr(pp->ports, cpu); + + napi_enable(&pcpu_port->napi); + } + + netif_tx_start_all_queues(pp->dev); + + return 0; +} + +static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mvneta_port *pp = netdev_priv(dev); + /* We require at least one supported parameter to be changed + * and no change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (!indir) + return 0; + + memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); + + return mvneta_config_rss(pp); +} + +static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct mvneta_port *pp = netdev_priv(dev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (!indir) + return 0; + + memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); + + return 0; +} + static const struct net_device_ops mvneta_netdev_ops = { .ndo_open = mvneta_open, .ndo_stop = mvneta_stop, @@ -3197,6 +3316,10 @@ const struct ethtool_ops mvneta_eth_tool_ops = { .get_strings = mvneta_ethtool_get_strings, .get_ethtool_stats = mvneta_ethtool_get_stats, .get_sset_count = mvneta_ethtool_get_sset_count, + .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, + .get_rxnfc = mvneta_ethtool_get_rxnfc, + .get_rxfh = mvneta_ethtool_get_rxfh, + .set_rxfh = mvneta_ethtool_set_rxfh, }; /* Initialize hw */ @@ -3389,6 +3512,8 @@ static int mvneta_probe(struct platform_device *pdev) pp->rxq_def = rxq_def; + pp->indir[0] = rxq_def; + pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { err = PTR_ERR(pp->clk);