Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/887917/?format=api
{ "id": 887917, "url": "http://patchwork.ozlabs.org/api/patches/887917/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180319215644.31978-12-jeffrey.t.kirsher@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20180319215644.31978-12-jeffrey.t.kirsher@intel.com>", "list_archive_url": null, "date": "2018-03-19T21:56:41", "name": "[v3,12/15] ice: Add stats and ethtool support", "commit_ref": null, "pull_url": null, "state": "superseded", "archived": false, "hash": "48f3f9af846d3414ba3d6ef71791b32cb83b42dd", "submitter": { "id": 473, "url": "http://patchwork.ozlabs.org/api/people/473/?format=api", "name": "Kirsher, Jeffrey T", "email": "jeffrey.t.kirsher@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180319215644.31978-12-jeffrey.t.kirsher@intel.com/mbox/", "series": [ { "id": 34702, "url": "http://patchwork.ozlabs.org/api/series/34702/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=34702", "date": "2018-03-19T21:56:30", "name": "[v3,01/15] ice: Add basic driver framework for Intel(R) E800 Series", "version": 3, "mbox": "http://patchwork.ozlabs.org/series/34702/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/887917/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/887917/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.133; helo=hemlock.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=none (p=none dis=none) header.from=intel.com" ], "Received": [ "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 404qf01N1Hz9sVl\n\tfor <incoming@patchwork.ozlabs.org>;\n\tTue, 20 Mar 2018 08:56:40 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id ABC1782462;\n\tMon, 19 Mar 2018 21:56:38 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id nTvEeTVhZdI1; Mon, 19 Mar 2018 21:56:29 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 5FB8288F51;\n\tMon, 19 Mar 2018 21:56:29 +0000 (UTC)", "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id 0BC341C2272\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 19 Mar 2018 21:56:28 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 086118906F\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 19 Mar 2018 21:56:28 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id grkcs0wh64j8 for <intel-wired-lan@lists.osuosl.org>;\n\tMon, 19 Mar 2018 21:56:13 +0000 (UTC)", "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id A572E890E2\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 19 Mar 2018 21:56:09 +0000 (UTC)", "from orsmga008.jf.intel.com ([10.7.209.65])\n\tby orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t19 Mar 2018 14:56:07 -0700", "from jtkirshe-nuc.jf.intel.com ([134.134.177.59])\n\tby orsmga008.jf.intel.com with ESMTP; 19 Mar 2018 14:56:07 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.48,332,1517904000\"; d=\"scan'208\";a=\"26667089\"", "From": "Jeff Kirsher <jeffrey.t.kirsher@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Mon, 19 Mar 2018 14:56:41 -0700", "Message-Id": "<20180319215644.31978-12-jeffrey.t.kirsher@intel.com>", "X-Mailer": "git-send-email 2.14.3", "In-Reply-To": "<20180319215644.31978-1-jeffrey.t.kirsher@intel.com>", "References": "<20180319215644.31978-1-jeffrey.t.kirsher@intel.com>", "Subject": "[Intel-wired-lan] [PATCH v3 12/15] ice: Add stats and ethtool\n\tsupport", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.24", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "MIME-Version": "1.0", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "From: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>\n\nThis patch implements a watchdog task to get packet statistics from\nthe device.\n\nThis patch also adds support for the following ethtool operations:\n\nethtool devname\nethtool -s devname [msglvl N] [msglevel type on|off]\nethtool -g|--show-ring devname\nethtool -G|--set-ring devname [rx N] [tx N]\nethtool -i|--driver devname\nethtool -d|--register-dump devname [raw on|off] [hex on|off] [file name]\nethtool -k|--show-features|--show-offload devname\nethtool -K|--features|--offload devname feature on|off\nethtool -P|--show-permaddr devname\nethtool -S|--statistics devname\nethtool -a|--show-pause devname\nethtool -A|--pause devname [autoneg on|off] [rx on|off] [tx on|off]\nethtool -r|--negotiate devname\n\nCC: Andrew Lunn <andrew@lunn.ch>\nCC: Jakub Kicinski <kubakici@wp.pl>\nCC: Stephen Hemminger <stephen@networkplumber.org>\nSigned-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>\nAcked-by: Stephen Hemminger <stephen@networkplumber.org>\n---\n drivers/net/ethernet/intel/ice/Makefile | 3 +-\n drivers/net/ethernet/intel/ice/ice.h | 28 +-\n drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 43 ++\n drivers/net/ethernet/intel/ice/ice_common.c | 195 +++++\n drivers/net/ethernet/intel/ice/ice_common.h | 5 +\n drivers/net/ethernet/intel/ice/ice_ethtool.c | 954 ++++++++++++++++++++++++\n drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 80 ++\n drivers/net/ethernet/intel/ice/ice_main.c | 469 +++++++++++-\n drivers/net/ethernet/intel/ice/ice_type.h | 70 ++\n 9 files changed, 1842 insertions(+), 5 deletions(-)\n create mode 100644 drivers/net/ethernet/intel/ice/ice_ethtool.c", "diff": "diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile\nindex 0abeb20c006d..643d63016624 100644\n--- a/drivers/net/ethernet/intel/ice/Makefile\n+++ b/drivers/net/ethernet/intel/ice/Makefile\n@@ -30,4 +30,5 @@ ice-y := ice_main.o\t\\\n \t ice_nvm.o\t\\\n \t ice_switch.o\t\\\n \t ice_sched.o\t\\\n-\t ice_txrx.o\n+\t ice_txrx.o\t\\\n+\t ice_ethtool.o\ndiff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h\nindex f10ae53cc4ac..6014ef9c36e1 100644\n--- a/drivers/net/ethernet/intel/ice/ice.h\n+++ b/drivers/net/ethernet/intel/ice/ice.h\n@@ -27,12 +27,14 @@\n #include <linux/etherdevice.h>\n #include <linux/skbuff.h>\n #include <linux/cpumask.h>\n+#include <linux/rtnetlink.h>\n #include <linux/if_vlan.h>\n #include <linux/dma-mapping.h>\n #include <linux/pci.h>\n #include <linux/workqueue.h>\n #include <linux/aer.h>\n #include <linux/interrupt.h>\n+#include <linux/ethtool.h>\n #include <linux/timer.h>\n #include <linux/delay.h>\n #include <linux/bitmap.h>\n@@ -48,10 +50,14 @@\n #include \"ice_common.h\"\n #include \"ice_sched.h\"\n \n+extern const char ice_drv_ver[];\n #define ICE_BAR0\t\t0\n #define ICE_DFLT_NUM_DESC\t128\n+#define ICE_MIN_NUM_DESC\t8\n+#define ICE_MAX_NUM_DESC\t8160\n #define ICE_REQ_DESC_MULTIPLE\t32\n #define ICE_INT_NAME_STR_LEN\t(IFNAMSIZ + 16)\n+#define ICE_ETHTOOL_FWVER_LEN\t32\n #define ICE_AQ_LEN\t\t64\n #define ICE_MIN_MSIX\t\t2\n #define ICE_NO_VSI\t\t0xffff\n@@ -70,6 +76,8 @@\n #define ICE_RES_MISC_VEC_ID\t(ICE_RES_VALID_BIT - 1)\n #define ICE_INVAL_Q_INDEX\t0xffff\n \n+#define ICE_VSIQF_HKEY_ARRAY_SIZE\t((VSIQF_HKEY_MAX_INDEX + 1) *\t4)\n+\n #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)\n \n #define ICE_MAX_MTU\t(ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \\\n@@ -116,6 +124,7 @@ enum ice_state {\n \t__ICE_DOWN,\n \t__ICE_PFR_REQ,\t\t\t/* set by driver and peers */\n \t__ICE_ADMINQ_EVENT_PENDING,\n+\t__ICE_CFG_BUSY,\n \t__ICE_SERVICE_SCHED,\n \t__ICE_STATE_NBITS\t\t/* must be last */\n };\n@@ -132,8 +141,13 @@ struct ice_vsi {\n \n \tirqreturn_t (*irq_handler)(int irq, void *data);\n \n+\tu64 tx_linearize;\n \tDECLARE_BITMAP(state, __ICE_STATE_NBITS);\n \tunsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];\n+\tu32 tx_restart;\n+\tu32 tx_busy;\n+\tu32 rx_buf_failed;\n+\tu32 rx_page_failed;\n \tint num_q_vectors;\n \tint base_vector;\n \tenum ice_vsi_type type;\n@@ -155,8 +169,14 @@ struct ice_vsi {\n \n \tstruct ice_aqc_vsi_props info;\t /* VSI properties */\n \n+\t/* VSI stats */\n+\tstruct rtnl_link_stats64 net_stats;\n+\tstruct ice_eth_stats eth_stats;\n+\tstruct ice_eth_stats eth_stats_prev;\n+\n \tbool irqs_ready;\n \tbool current_isup;\t\t /* Sync 'link up' logging */\n+\tbool stat_offsets_loaded;\n \n \t/* queue information */\n \tu8 tx_mapping_mode;\t\t /* ICE_MAP_MODE_[CONTIG|SCATTER] */\n@@ -219,8 +239,10 @@ struct ice_pf {\n \tu16 q_left_rx;\t\t/* remaining num rx queues left unclaimed */\n \tu16 next_vsi;\t\t/* Next free slot in pf->vsi[] - 0-based! */\n \tu16 num_alloc_vsi;\n-\n+\tstruct ice_hw_port_stats stats;\n+\tstruct ice_hw_port_stats stats_prev;\n \tstruct ice_hw hw;\n+\tbool stat_prev_loaded;\t/* has previous stats been loaded */\n \tchar int_name[ICE_INT_NAME_STR_LEN];\n };\n \n@@ -253,8 +275,12 @@ static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,\n \twr32(hw, GLINT_DYN_CTL(vector), val);\n }\n \n+void ice_set_ethtool_ops(struct net_device *netdev);\n+int ice_up(struct ice_vsi *vsi);\n+int ice_down(struct ice_vsi *vsi);\n int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);\n int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);\n void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);\n+void ice_print_link_msg(struct ice_vsi *vsi, bool isup);\n \n #endif /* _ICE_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\nindex 49102817f0a9..2c8d8533f87d 100644\n--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n@@ -873,6 +873,45 @@ struct ice_aqc_get_phy_caps_data {\n \t} qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX];\n };\n \n+/* Set PHY capabilities (direct 0x0601)\n+ * NOTE: This command must be followed by setup link and restart auto-neg\n+ */\n+struct ice_aqc_set_phy_cfg {\n+\tu8 lport_num;\n+\tu8 reserved[7];\n+\t__le32 addr_high;\n+\t__le32 addr_low;\n+};\n+\n+/* Set PHY config command data structure */\n+struct ice_aqc_set_phy_cfg_data {\n+\t__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */\n+\t__le64 rsvd0;\n+\tu8 caps;\n+#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY\t\tBIT(0)\n+#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY\t\tBIT(1)\n+#define ICE_AQ_PHY_ENA_LOW_POWER\t\tBIT(2)\n+#define ICE_AQ_PHY_ENA_LINK\t\t\tBIT(3)\n+#define ICE_AQ_PHY_ENA_ATOMIC_LINK\t\tBIT(5)\n+\tu8 low_power_ctrl;\n+\t__le16 eee_cap; /* Value from ice_aqc_get_phy_caps */\n+\t__le16 eeer_value;\n+\tu8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */\n+\tu8 rsvd1;\n+};\n+\n+/* Restart AN command data structure (direct 0x0605)\n+ * Also used for response, with only the lport_num field present.\n+ */\n+struct ice_aqc_restart_an {\n+\tu8 lport_num;\n+\tu8 reserved;\n+\tu8 cmd_flags;\n+#define ICE_AQC_RESTART_AN_LINK_RESTART\tBIT(1)\n+#define ICE_AQC_RESTART_AN_LINK_ENABLE\tBIT(2)\n+\tu8 reserved2[13];\n+};\n+\n /* Get link status (indirect 0x0607), also used for Link Status Event */\n struct ice_aqc_get_link_status {\n \tu8 lport_num;\n@@ -1151,6 +1190,8 @@ struct ice_aq_desc {\n \t\tstruct ice_aqc_clear_pxe clear_pxe;\n \t\tstruct ice_aqc_list_caps get_cap;\n \t\tstruct ice_aqc_get_phy_caps get_phy;\n+\t\tstruct ice_aqc_set_phy_cfg set_phy;\n+\t\tstruct ice_aqc_restart_an restart_an;\n \t\tstruct ice_aqc_get_sw_cfg get_sw_conf;\n \t\tstruct ice_aqc_sw_rules sw_rules;\n \t\tstruct ice_aqc_get_topo get_topo;\n@@ -1236,6 +1277,8 @@ enum ice_adminq_opc {\n \n \t/* PHY commands */\n \tice_aqc_opc_get_phy_caps\t\t\t= 0x0600,\n+\tice_aqc_opc_set_phy_cfg\t\t\t\t= 0x0601,\n+\tice_aqc_opc_restart_an\t\t\t\t= 0x0605,\n \tice_aqc_opc_get_link_status\t\t\t= 0x0607,\n \n \t/* NVM commands */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c\nindex ed329c4b4c52..43cca9370444 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.c\n+++ b/drivers/net/ethernet/intel/ice/ice_common.c\n@@ -1275,6 +1275,201 @@ void ice_clear_pxe_mode(struct ice_hw *hw)\n \t\tice_aq_clear_pxe_mode(hw);\n }\n \n+/**\n+ * ice_aq_set_phy_cfg\n+ * @hw: pointer to the hw struct\n+ * @lport: logical port number\n+ * @cfg: structure with PHY configuration data to be set\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Set the various PHY configuration parameters supported on the Port.\n+ * One or more of the Set PHY config parameters may be ignored in an MFP\n+ * mode as the PF may not have the privilege to set some of the PHY Config\n+ * parameters. This status will be indicated by the command response (0x0601).\n+ */\n+static enum ice_status\n+ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,\n+\t\t struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_set_phy_cfg *cmd;\n+\tstruct ice_aq_desc desc;\n+\n+\tif (!cfg)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tcmd = &desc.params.set_phy;\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);\n+\tcmd->lport_num = lport;\n+\n+\treturn ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);\n+}\n+\n+/**\n+ * ice_update_link_info - update status of the HW network link\n+ * @pi: port info structure of the interested logical port\n+ */\n+static enum ice_status\n+ice_update_link_info(struct ice_port_info *pi)\n+{\n+\tstruct ice_aqc_get_phy_caps_data *pcaps;\n+\tstruct ice_phy_info *phy_info;\n+\tenum ice_status status;\n+\tstruct ice_hw *hw;\n+\n+\tif (!pi)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\thw = pi->hw;\n+\n+\tpcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);\n+\tif (!pcaps)\n+\t\treturn ICE_ERR_NO_MEMORY;\n+\n+\tphy_info = &pi->phy;\n+\tstatus = ice_aq_get_link_info(pi, true, NULL, NULL);\n+\tif (status)\n+\t\tgoto out;\n+\n+\tif (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {\n+\t\tstatus = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,\n+\t\t\t\t\t pcaps, NULL);\n+\t\tif (status)\n+\t\t\tgoto out;\n+\n+\t\tmemcpy(phy_info->link_info.module_type, &pcaps->module_type,\n+\t\t sizeof(phy_info->link_info.module_type));\n+\t}\n+out:\n+\tdevm_kfree(ice_hw_to_dev(hw), pcaps);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_set_fc\n+ * @pi: port information structure\n+ * @aq_failures: pointer to status code, specific to ice_set_fc routine\n+ * @atomic_restart: enable automatic link update\n+ *\n+ * Set the requested flow control mode.\n+ */\n+enum ice_status\n+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)\n+{\n+\tstruct ice_aqc_set_phy_cfg_data cfg = { 0 };\n+\tstruct ice_aqc_get_phy_caps_data *pcaps;\n+\tenum ice_status status;\n+\tu8 pause_mask = 0x0;\n+\tstruct ice_hw *hw;\n+\n+\tif (!pi)\n+\t\treturn ICE_ERR_PARAM;\n+\thw = pi->hw;\n+\t*aq_failures = ICE_SET_FC_AQ_FAIL_NONE;\n+\n+\tswitch (pi->fc.req_mode) {\n+\tcase ICE_FC_FULL:\n+\t\tpause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;\n+\t\tpause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;\n+\t\tbreak;\n+\tcase ICE_FC_RX_PAUSE:\n+\t\tpause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;\n+\t\tbreak;\n+\tcase ICE_FC_TX_PAUSE:\n+\t\tpause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\tpcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);\n+\tif (!pcaps)\n+\t\treturn ICE_ERR_NO_MEMORY;\n+\n+\t/* Get the current phy config */\n+\tstatus = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,\n+\t\t\t\t NULL);\n+\tif (status) {\n+\t\t*aq_failures = ICE_SET_FC_AQ_FAIL_GET;\n+\t\tgoto out;\n+\t}\n+\n+\t/* clear the old pause settings */\n+\tcfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |\n+\t\t\t\t ICE_AQC_PHY_EN_RX_LINK_PAUSE);\n+\t/* set the new capabilities */\n+\tcfg.caps |= pause_mask;\n+\t/* If the capabilities have changed, then set the new config */\n+\tif (cfg.caps != pcaps->caps) {\n+\t\tint retry_count, retry_max = 10;\n+\n+\t\t/* Auto restart link so settings take effect */\n+\t\tif (atomic_restart)\n+\t\t\tcfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK;\n+\t\t/* Copy over all the old settings */\n+\t\tcfg.phy_type_low = pcaps->phy_type_low;\n+\t\tcfg.low_power_ctrl = pcaps->low_power_ctrl;\n+\t\tcfg.eee_cap = pcaps->eee_cap;\n+\t\tcfg.eeer_value = pcaps->eeer_value;\n+\t\tcfg.link_fec_opt = pcaps->link_fec_options;\n+\n+\t\tstatus = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);\n+\t\tif (status) {\n+\t\t\t*aq_failures = ICE_SET_FC_AQ_FAIL_SET;\n+\t\t\tgoto out;\n+\t\t}\n+\n+\t\t/* Update the link info\n+\t\t * It sometimes takes a really long time for link to\n+\t\t * come back from the atomic reset. Thus, we wait a\n+\t\t * little bit.\n+\t\t */\n+\t\tfor (retry_count = 0; retry_count < retry_max; retry_count++) {\n+\t\t\tstatus = ice_update_link_info(pi);\n+\n+\t\t\tif (!status)\n+\t\t\t\tbreak;\n+\n+\t\t\tmdelay(100);\n+\t\t}\n+\n+\t\tif (status)\n+\t\t\t*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;\n+\t}\n+\n+out:\n+\tdevm_kfree(ice_hw_to_dev(hw), pcaps);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_aq_set_link_restart_an\n+ * @pi: pointer to the port information structure\n+ * @ena_link: if true: enable link, if false: disable link\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Sets up the link and restarts the Auto-Negotiation over the link.\n+ */\n+enum ice_status\n+ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,\n+\t\t\t struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_restart_an *cmd;\n+\tstruct ice_aq_desc desc;\n+\n+\tcmd = &desc.params.restart_an;\n+\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);\n+\n+\tcmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;\n+\tcmd->lport_num = pi->lport;\n+\tif (ena_link)\n+\t\tcmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;\n+\telse\n+\t\tcmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;\n+\n+\treturn ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);\n+}\n+\n /**\n * __ice_aq_get_set_rss_lut\n * @hw: pointer to the hardware structure\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h\nindex 5de509394e21..bc52b7bcc78c 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.h\n+++ b/drivers/net/ethernet/intel/ice/ice_common.h\n@@ -72,6 +72,11 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,\n enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);\n enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);\n enum ice_status\n+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart);\n+enum ice_status\n+ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,\n+\t\t\t struct ice_sq_cd *cd);\n+enum ice_status\n ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,\n \t\t struct ice_link_status *link, struct ice_sq_cd *cd);\n enum ice_status\ndiff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c\nnew file mode 100644\nindex 000000000000..0c670d6f8d6f\n--- /dev/null\n+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c\n@@ -0,0 +1,954 @@\n+// SPDX-License-Identifier: GPL-2.0-only\n+/* Intel(R) Ethernet Connection E800 Series Linux Driver\n+ * Copyright (c) 2018, Intel Corporation.\n+ *\n+ * This program is free software; you can redistribute it and/or modify it\n+ * under the terms and conditions of the GNU General Public License,\n+ * version 2, as published by the Free Software Foundation.\n+ *\n+ * This program is distributed in the hope it will be useful, but WITHOUT\n+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n+ * more details.\n+ *\n+ * The full GNU General Public License is included in this distribution in\n+ * the file called \"COPYING\".\n+ */\n+\n+/* ethtool support for ice */\n+\n+#include \"ice.h\"\n+\n+struct ice_stats {\n+\tchar stat_string[ETH_GSTRING_LEN];\n+\tint sizeof_stat;\n+\tint stat_offset;\n+};\n+\n+#define ICE_STAT(_type, _name, _stat) { \\\n+\t.stat_string = _name, \\\n+\t.sizeof_stat = FIELD_SIZEOF(_type, _stat), \\\n+\t.stat_offset = offsetof(_type, _stat) \\\n+}\n+\n+#define ICE_VSI_STAT(_name, _stat) \\\n+\t\tICE_STAT(struct ice_vsi, _name, _stat)\n+#define ICE_PF_STAT(_name, _stat) \\\n+\t\tICE_STAT(struct ice_pf, _name, _stat)\n+\n+static int ice_q_stats_len(struct net_device *netdev)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\n+\treturn ((np->vsi->num_txq + np->vsi->num_rxq) *\n+\t\t(sizeof(struct ice_q_stats) / sizeof(u64)));\n+}\n+\n+#define ICE_PF_STATS_LEN\tARRAY_SIZE(ice_gstrings_pf_stats)\n+#define ICE_VSI_STATS_LEN\tARRAY_SIZE(ice_gstrings_vsi_stats)\n+\n+#define ICE_ALL_STATS_LEN(n)\t(ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \\\n+\t\t\t\t ice_q_stats_len(n))\n+\n+static const struct ice_stats ice_gstrings_vsi_stats[] = {\n+\tICE_VSI_STAT(\"tx_unicast\", eth_stats.tx_unicast),\n+\tICE_VSI_STAT(\"rx_unicast\", eth_stats.rx_unicast),\n+\tICE_VSI_STAT(\"tx_multicast\", eth_stats.tx_multicast),\n+\tICE_VSI_STAT(\"rx_multicast\", eth_stats.rx_multicast),\n+\tICE_VSI_STAT(\"tx_broadcast\", eth_stats.tx_broadcast),\n+\tICE_VSI_STAT(\"rx_broadcast\", eth_stats.rx_broadcast),\n+\tICE_VSI_STAT(\"tx_bytes\", eth_stats.tx_bytes),\n+\tICE_VSI_STAT(\"rx_bytes\", eth_stats.rx_bytes),\n+\tICE_VSI_STAT(\"rx_discards\", eth_stats.rx_discards),\n+\tICE_VSI_STAT(\"tx_errors\", eth_stats.tx_errors),\n+\tICE_VSI_STAT(\"tx_linearize\", tx_linearize),\n+\tICE_VSI_STAT(\"rx_unknown_protocol\", eth_stats.rx_unknown_protocol),\n+\tICE_VSI_STAT(\"rx_alloc_fail\", rx_buf_failed),\n+\tICE_VSI_STAT(\"rx_pg_alloc_fail\", rx_page_failed),\n+};\n+\n+/* These PF_STATs might look like duplicates of some NETDEV_STATs,\n+ * but they aren't. This device is capable of supporting multiple\n+ * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual\n+ * netdevs whereas the PF_STATs are for the physical function that's\n+ * hosting these netdevs.\n+ *\n+ * The PF_STATs are appended to the netdev stats only when ethtool -S\n+ * is queried on the base PF netdev.\n+ */\n+static struct ice_stats ice_gstrings_pf_stats[] = {\n+\tICE_PF_STAT(\"tx_bytes\", stats.eth.tx_bytes),\n+\tICE_PF_STAT(\"rx_bytes\", stats.eth.rx_bytes),\n+\tICE_PF_STAT(\"tx_unicast\", stats.eth.tx_unicast),\n+\tICE_PF_STAT(\"rx_unicast\", stats.eth.rx_unicast),\n+\tICE_PF_STAT(\"tx_multicast\", stats.eth.tx_multicast),\n+\tICE_PF_STAT(\"rx_multicast\", stats.eth.rx_multicast),\n+\tICE_PF_STAT(\"tx_broadcast\", stats.eth.tx_broadcast),\n+\tICE_PF_STAT(\"rx_broadcast\", stats.eth.rx_broadcast),\n+\tICE_PF_STAT(\"tx_errors\", stats.eth.tx_errors),\n+\tICE_PF_STAT(\"tx_size_64\", stats.tx_size_64),\n+\tICE_PF_STAT(\"rx_size_64\", stats.rx_size_64),\n+\tICE_PF_STAT(\"tx_size_127\", stats.tx_size_127),\n+\tICE_PF_STAT(\"rx_size_127\", stats.rx_size_127),\n+\tICE_PF_STAT(\"tx_size_255\", stats.tx_size_255),\n+\tICE_PF_STAT(\"rx_size_255\", stats.rx_size_255),\n+\tICE_PF_STAT(\"tx_size_511\", stats.tx_size_511),\n+\tICE_PF_STAT(\"rx_size_511\", stats.rx_size_511),\n+\tICE_PF_STAT(\"tx_size_1023\", stats.tx_size_1023),\n+\tICE_PF_STAT(\"rx_size_1023\", stats.rx_size_1023),\n+\tICE_PF_STAT(\"tx_size_1522\", stats.tx_size_1522),\n+\tICE_PF_STAT(\"rx_size_1522\", stats.rx_size_1522),\n+\tICE_PF_STAT(\"tx_size_big\", stats.tx_size_big),\n+\tICE_PF_STAT(\"rx_size_big\", stats.rx_size_big),\n+\tICE_PF_STAT(\"link_xon_tx\", stats.link_xon_tx),\n+\tICE_PF_STAT(\"link_xon_rx\", stats.link_xon_rx),\n+\tICE_PF_STAT(\"link_xoff_tx\", stats.link_xoff_tx),\n+\tICE_PF_STAT(\"link_xoff_rx\", stats.link_xoff_rx),\n+\tICE_PF_STAT(\"tx_dropped_link_down\", stats.tx_dropped_link_down),\n+\tICE_PF_STAT(\"rx_undersize\", stats.rx_undersize),\n+\tICE_PF_STAT(\"rx_fragments\", stats.rx_fragments),\n+\tICE_PF_STAT(\"rx_oversize\", stats.rx_oversize),\n+\tICE_PF_STAT(\"rx_jabber\", stats.rx_jabber),\n+\tICE_PF_STAT(\"rx_csum_bad\", hw_csum_rx_error),\n+\tICE_PF_STAT(\"rx_length_errors\", stats.rx_len_errors),\n+\tICE_PF_STAT(\"rx_dropped\", stats.eth.rx_discards),\n+\tICE_PF_STAT(\"rx_crc_errors\", stats.crc_errors),\n+\tICE_PF_STAT(\"illegal_bytes\", stats.illegal_bytes),\n+\tICE_PF_STAT(\"mac_local_faults\", stats.mac_local_faults),\n+\tICE_PF_STAT(\"mac_remote_faults\", stats.mac_remote_faults),\n+};\n+\n+static u32 ice_regs_dump_list[] = {\n+\tPFGEN_STATE,\n+\tPRTGEN_STATUS,\n+\tQRX_CTRL(0),\n+\tQINT_TQCTL(0),\n+\tQINT_RQCTL(0),\n+\tPFINT_OICR_ENA,\n+\tQRX_ITR(0),\n+};\n+\n+/**\n+ * ice_nvm_version_str - format the NVM version strings\n+ * @hw: ptr to the hardware info\n+ */\n+static char *ice_nvm_version_str(struct ice_hw *hw)\n+{\n+\tstatic char buf[ICE_ETHTOOL_FWVER_LEN];\n+\tu8 ver, patch;\n+\tu32 full_ver;\n+\tu16 build;\n+\n+\tfull_ver = hw->nvm.oem_ver;\n+\tver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);\n+\tbuild = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>\n+\t\t ICE_OEM_VER_BUILD_SHIFT);\n+\tpatch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);\n+\n+\tsnprintf(buf, sizeof(buf), \"%x.%02x 0x%x %d.%d.%d\",\n+\t\t (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,\n+\t\t (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,\n+\t\t hw->nvm.eetrack, ver, build, patch);\n+\n+\treturn buf;\n+}\n+\n+static void\n+ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tstruct ice_pf *pf = vsi->back;\n+\n+\tstrlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));\n+\tstrlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));\n+\tstrlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),\n+\t\tsizeof(drvinfo->fw_version));\n+\tstrlcpy(drvinfo->bus_info, pci_name(pf->pdev),\n+\t\tsizeof(drvinfo->bus_info));\n+}\n+\n+static int ice_get_regs_len(struct net_device __always_unused *netdev)\n+{\n+\treturn ARRAY_SIZE(ice_regs_dump_list);\n+}\n+\n+static void\n+ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_pf *pf = np->vsi->back;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tu32 *regs_buf = (u32 *)p;\n+\tint i;\n+\n+\tregs->version = 1;\n+\n+\tfor (i = 0; i < sizeof(ice_regs_dump_list) / sizeof(u32); ++i)\n+\t\tregs_buf[i] = rd32(hw, ice_regs_dump_list[i]);\n+}\n+\n+static u32 ice_get_msglevel(struct net_device *netdev)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_pf *pf = np->vsi->back;\n+\n+#ifndef CONFIG_DYNAMIC_DEBUG\n+\tif (pf->hw.debug_mask)\n+\t\tnetdev_info(netdev, \"hw debug_mask: 0x%llX\\n\",\n+\t\t\t pf->hw.debug_mask);\n+#endif /* !CONFIG_DYNAMIC_DEBUG */\n+\n+\treturn pf->msg_enable;\n+}\n+\n+static void ice_set_msglevel(struct net_device *netdev, u32 data)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_pf *pf = np->vsi->back;\n+\n+#ifndef CONFIG_DYNAMIC_DEBUG\n+\tif (ICE_DBG_USER & data)\n+\t\tpf->hw.debug_mask = data;\n+\telse\n+\t\tpf->msg_enable = data;\n+#else\n+\tpf->msg_enable = data;\n+#endif /* !CONFIG_DYNAMIC_DEBUG */\n+}\n+\n+static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tchar *p = (char *)data;\n+\tunsigned int i;\n+\n+\tswitch (stringset) {\n+\tcase ETH_SS_STATS:\n+\t\tfor (i = 0; i < ICE_VSI_STATS_LEN; i++) {\n+\t\t\tsnprintf(p, ETH_GSTRING_LEN, \"%s\",\n+\t\t\t\t ice_gstrings_vsi_stats[i].stat_string);\n+\t\t\tp += ETH_GSTRING_LEN;\n+\t\t}\n+\n+\t\tice_for_each_txq(vsi, i) {\n+\t\t\tsnprintf(p, ETH_GSTRING_LEN,\n+\t\t\t\t \"tx-queue-%u.tx_packets\", i);\n+\t\t\tp += ETH_GSTRING_LEN;\n+\t\t\tsnprintf(p, ETH_GSTRING_LEN, \"tx-queue-%u.tx_bytes\", i);\n+\t\t\tp += ETH_GSTRING_LEN;\n+\t\t}\n+\n+\t\tice_for_each_rxq(vsi, i) {\n+\t\t\tsnprintf(p, ETH_GSTRING_LEN,\n+\t\t\t\t \"rx-queue-%u.rx_packets\", i);\n+\t\t\tp += ETH_GSTRING_LEN;\n+\t\t\tsnprintf(p, ETH_GSTRING_LEN, \"rx-queue-%u.rx_bytes\", i);\n+\t\t\tp += ETH_GSTRING_LEN;\n+\t\t}\n+\n+\t\tif (vsi->type != ICE_VSI_PF)\n+\t\t\treturn;\n+\n+\t\tfor (i = 0; i < ICE_PF_STATS_LEN; i++) {\n+\t\t\tsnprintf(p, ETH_GSTRING_LEN, \"port.%s\",\n+\t\t\t\t ice_gstrings_pf_stats[i].stat_string);\n+\t\t\tp += ETH_GSTRING_LEN;\n+\t\t}\n+\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+}\n+\n+static int ice_get_sset_count(struct net_device *netdev, int sset)\n+{\n+\tswitch (sset) {\n+\tcase ETH_SS_STATS:\n+\t\treturn ICE_ALL_STATS_LEN(netdev);\n+\tdefault:\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+}\n+\n+static void\n+ice_get_ethtool_stats(struct net_device *netdev,\n+\t\t struct ethtool_stats __always_unused *stats, u64 *data)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_ring *ring;\n+\tunsigned int j = 0;\n+\tint i = 0;\n+\tchar *p;\n+\n+\tfor (j = 0; j < ICE_VSI_STATS_LEN; j++) {\n+\t\tp = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;\n+\t\tdata[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==\n+\t\t\t sizeof(u64)) ? *(u64 *)p : *(u32 *)p;\n+\t}\n+\n+\t/* populate per queue stats */\n+\trcu_read_lock();\n+\n+\tice_for_each_txq(vsi, j) {\n+\t\tring = READ_ONCE(vsi->tx_rings[j]);\n+\t\tif (!ring)\n+\t\t\tcontinue;\n+\t\tdata[i++] = ring->stats.pkts;\n+\t\tdata[i++] = ring->stats.bytes;\n+\t}\n+\n+\tice_for_each_rxq(vsi, j) {\n+\t\tring = READ_ONCE(vsi->rx_rings[j]);\n+\t\tdata[i++] = ring->stats.pkts;\n+\t\tdata[i++] = ring->stats.bytes;\n+\t}\n+\n+\trcu_read_unlock();\n+\n+\tif (vsi->type != ICE_VSI_PF)\n+\t\treturn;\n+\n+\tfor (j = 0; j < ICE_PF_STATS_LEN; j++) {\n+\t\tp = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;\n+\t\tdata[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==\n+\t\t\t sizeof(u64)) ? *(u64 *)p : *(u32 *)p;\n+\t}\n+}\n+\n+static int\n+ice_get_link_ksettings(struct net_device *netdev,\n+\t\t struct ethtool_link_ksettings *ks)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_link_status *hw_link_info;\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tbool link_up;\n+\n+\thw_link_info = &vsi->port_info->phy.link_info;\n+\tlink_up = hw_link_info->link_info & ICE_AQ_LINK_UP;\n+\n+\tethtool_link_ksettings_add_link_mode(ks, supported,\n+\t\t\t\t\t 10000baseT_Full);\n+\tethtool_link_ksettings_add_link_mode(ks, advertising,\n+\t\t\t\t\t 10000baseT_Full);\n+\n+\t/* set speed and duplex */\n+\tif (link_up) {\n+\t\tswitch (hw_link_info->link_speed) {\n+\t\tcase ICE_AQ_LINK_SPEED_100MB:\n+\t\t\tks->base.speed = SPEED_100;\n+\t\t\tbreak;\n+\t\tcase ICE_AQ_LINK_SPEED_2500MB:\n+\t\t\tks->base.speed = SPEED_2500;\n+\t\t\tbreak;\n+\t\tcase ICE_AQ_LINK_SPEED_5GB:\n+\t\t\tks->base.speed = SPEED_5000;\n+\t\t\tbreak;\n+\t\tcase ICE_AQ_LINK_SPEED_10GB:\n+\t\t\tks->base.speed = SPEED_10000;\n+\t\t\tbreak;\n+\t\tcase ICE_AQ_LINK_SPEED_25GB:\n+\t\t\tks->base.speed = SPEED_25000;\n+\t\t\tbreak;\n+\t\tcase ICE_AQ_LINK_SPEED_40GB:\n+\t\t\tks->base.speed = SPEED_40000;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tks->base.speed = SPEED_UNKNOWN;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tks->base.duplex = DUPLEX_FULL;\n+\t} else {\n+\t\tks->base.speed = SPEED_UNKNOWN;\n+\t\tks->base.duplex = DUPLEX_UNKNOWN;\n+\t}\n+\n+\t/* set autoneg settings */\n+\tks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?\n+\t\t\t AUTONEG_ENABLE : AUTONEG_DISABLE);\n+\n+\t/* set media type settings */\n+\tswitch (vsi->port_info->phy.media_type) {\n+\tcase ICE_MEDIA_FIBER:\n+\t\tethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);\n+\t\tks->base.port = PORT_FIBRE;\n+\t\tbreak;\n+\tcase ICE_MEDIA_BASET:\n+\t\tethtool_link_ksettings_add_link_mode(ks, supported, TP);\n+\t\tethtool_link_ksettings_add_link_mode(ks, advertising, TP);\n+\t\tks->base.port = PORT_TP;\n+\t\tbreak;\n+\tcase ICE_MEDIA_BACKPLANE:\n+\t\tethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);\n+\t\tethtool_link_ksettings_add_link_mode(ks, supported, Backplane);\n+\t\tethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);\n+\t\tethtool_link_ksettings_add_link_mode(ks, advertising,\n+\t\t\t\t\t\t Backplane);\n+\t\tks->base.port = PORT_NONE;\n+\t\tbreak;\n+\tcase ICE_MEDIA_DA:\n+\t\tethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);\n+\t\tethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);\n+\t\tks->base.port = PORT_DA;\n+\t\tbreak;\n+\tdefault:\n+\t\tks->base.port = PORT_OTHER;\n+\t\tbreak;\n+\t}\n+\n+\t/* flow control is symmetric and always supported */\n+\tethtool_link_ksettings_add_link_mode(ks, supported, Pause);\n+\n+\tswitch (vsi->port_info->fc.req_mode) {\n+\tcase ICE_FC_FULL:\n+\t\tethtool_link_ksettings_add_link_mode(ks, advertising, Pause);\n+\t\tbreak;\n+\tcase ICE_FC_TX_PAUSE:\n+\t\tethtool_link_ksettings_add_link_mode(ks, advertising,\n+\t\t\t\t\t\t Asym_Pause);\n+\t\tbreak;\n+\tcase ICE_FC_RX_PAUSE:\n+\t\tethtool_link_ksettings_add_link_mode(ks, advertising, Pause);\n+\t\tethtool_link_ksettings_add_link_mode(ks, advertising,\n+\t\t\t\t\t\t Asym_Pause);\n+\t\tbreak;\n+\tcase ICE_FC_PFC:\n+\tdefault:\n+\t\tethtool_link_ksettings_del_link_mode(ks, advertising, Pause);\n+\t\tethtool_link_ksettings_del_link_mode(ks, advertising,\n+\t\t\t\t\t\t Asym_Pause);\n+\t\tbreak;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_get_rxnfc - command to get RX flow classification rules\n+ * @netdev: network interface device structure\n+ * @cmd: ethtool rxnfc command\n+ * @rule_locs: buffer to rturn Rx flow classification rules\n+ *\n+ * Returns Success if the command is supported.\n+ */\n+static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,\n+\t\t\t u32 __always_unused *rule_locs)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tint ret = -EOPNOTSUPP;\n+\n+\tswitch (cmd->cmd) {\n+\tcase ETHTOOL_GRXRINGS:\n+\t\tcmd->data = vsi->rss_size;\n+\t\tret = 0;\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static void\n+ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\n+\tring->rx_max_pending = ICE_MAX_NUM_DESC;\n+\tring->tx_max_pending = ICE_MAX_NUM_DESC;\n+\tring->rx_pending = vsi->rx_rings[0]->count;\n+\tring->tx_pending = vsi->tx_rings[0]->count;\n+\tring->rx_mini_pending = ICE_MIN_NUM_DESC;\n+\tring->rx_mini_max_pending = 0;\n+\tring->rx_jumbo_max_pending = 0;\n+\tring->rx_jumbo_pending = 0;\n+}\n+\n+static int\n+ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)\n+{\n+\tstruct ice_ring *tx_rings = NULL, *rx_rings = NULL;\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tstruct ice_pf *pf = vsi->back;\n+\tint i, timeout = 50, err = 0;\n+\tu32 new_rx_cnt, new_tx_cnt;\n+\n+\tif (ring->tx_pending > ICE_MAX_NUM_DESC ||\n+\t ring->tx_pending < ICE_MIN_NUM_DESC ||\n+\t ring->rx_pending > ICE_MAX_NUM_DESC ||\n+\t ring->rx_pending < ICE_MIN_NUM_DESC) {\n+\t\tnetdev_err(netdev, \"Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\\n\",\n+\t\t\t ring->tx_pending, ring->rx_pending,\n+\t\t\t ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tnew_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);\n+\tnew_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);\n+\n+\t/* if nothing to do return success */\n+\tif (new_tx_cnt == vsi->tx_rings[0]->count &&\n+\t new_rx_cnt == vsi->rx_rings[0]->count) {\n+\t\tnetdev_dbg(netdev, \"Nothing to change, descriptor count is same as requested\\n\");\n+\t\treturn 0;\n+\t}\n+\n+\twhile (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {\n+\t\ttimeout--;\n+\t\tif (!timeout)\n+\t\t\treturn -EBUSY;\n+\t\tusleep_range(1000, 2000);\n+\t}\n+\n+\t/* set for the next time the netdev is started */\n+\tif (!netif_running(vsi->netdev)) {\n+\t\tfor (i = 0; i < vsi->alloc_txq; i++)\n+\t\t\tvsi->tx_rings[i]->count = new_tx_cnt;\n+\t\tfor (i = 0; i < vsi->alloc_rxq; i++)\n+\t\t\tvsi->rx_rings[i]->count = new_rx_cnt;\n+\t\tnetdev_dbg(netdev, \"Link is down, descriptor count change happens when link is brought up\\n\");\n+\t\tgoto done;\n+\t}\n+\n+\tif (new_tx_cnt == vsi->tx_rings[0]->count)\n+\t\tgoto process_rx;\n+\n+\t/* alloc updated Tx resources */\n+\tnetdev_info(netdev, \"Changing Tx descriptor count from %d to %d\\n\",\n+\t\t vsi->tx_rings[0]->count, new_tx_cnt);\n+\n+\ttx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,\n+\t\t\t\tsizeof(struct ice_ring), GFP_KERNEL);\n+\tif (!tx_rings) {\n+\t\terr = -ENOMEM;\n+\t\tgoto done;\n+\t}\n+\n+\tfor (i = 0; i < vsi->num_txq; i++) {\n+\t\t/* clone ring and setup updated count */\n+\t\ttx_rings[i] = *vsi->tx_rings[i];\n+\t\ttx_rings[i].count = new_tx_cnt;\n+\t\ttx_rings[i].desc = NULL;\n+\t\ttx_rings[i].tx_buf = NULL;\n+\t\terr = ice_setup_tx_ring(&tx_rings[i]);\n+\t\tif (err) {\n+\t\t\twhile (i) {\n+\t\t\t\ti--;\n+\t\t\t\tice_clean_tx_ring(&tx_rings[i]);\n+\t\t\t}\n+\t\t\tdevm_kfree(&pf->pdev->dev, tx_rings);\n+\t\t\tgoto done;\n+\t\t}\n+\t}\n+\n+process_rx:\n+\tif (new_rx_cnt == vsi->rx_rings[0]->count)\n+\t\tgoto process_link;\n+\n+\t/* alloc updated Rx resources */\n+\tnetdev_info(netdev, \"Changing Rx descriptor count from %d to %d\\n\",\n+\t\t vsi->rx_rings[0]->count, new_rx_cnt);\n+\n+\trx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,\n+\t\t\t\tsizeof(struct ice_ring), GFP_KERNEL);\n+\tif (!rx_rings) {\n+\t\terr = -ENOMEM;\n+\t\tgoto done;\n+\t}\n+\n+\tfor (i = 0; i < vsi->num_rxq; i++) {\n+\t\t/* clone ring and setup updated count */\n+\t\trx_rings[i] = *vsi->rx_rings[i];\n+\t\trx_rings[i].count = new_rx_cnt;\n+\t\trx_rings[i].desc = NULL;\n+\t\trx_rings[i].rx_buf = NULL;\n+\t\t/* this is to allow wr32 to have something to write to\n+\t\t * during early allocation of Rx buffers\n+\t\t */\n+\t\trx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;\n+\n+\t\terr = ice_setup_rx_ring(&rx_rings[i]);\n+\t\tif (err)\n+\t\t\tgoto rx_unwind;\n+\n+\t\t/* allocate Rx buffers */\n+\t\terr = ice_alloc_rx_bufs(&rx_rings[i],\n+\t\t\t\t\tICE_DESC_UNUSED(&rx_rings[i]));\n+rx_unwind:\n+\t\tif (err) {\n+\t\t\twhile (i) {\n+\t\t\t\ti--;\n+\t\t\t\tice_free_rx_ring(&rx_rings[i]);\n+\t\t\t}\n+\t\t\tdevm_kfree(&pf->pdev->dev, rx_rings);\n+\t\t\terr = -ENOMEM;\n+\t\t\tgoto free_tx;\n+\t\t}\n+\t}\n+\n+process_link:\n+\t/* Bring interface down, copy in the new ring info, then restore the\n+\t * interface. if VSI is up, bring it down and then back up\n+\t */\n+\tif (!test_and_set_bit(__ICE_DOWN, vsi->state)) {\n+\t\tice_down(vsi);\n+\n+\t\tif (tx_rings) {\n+\t\t\tfor (i = 0; i < vsi->alloc_txq; i++) {\n+\t\t\t\tice_free_tx_ring(vsi->tx_rings[i]);\n+\t\t\t\t*vsi->tx_rings[i] = tx_rings[i];\n+\t\t\t}\n+\t\t\tdevm_kfree(&pf->pdev->dev, tx_rings);\n+\t\t}\n+\n+\t\tif (rx_rings) {\n+\t\t\tfor (i = 0; i < vsi->alloc_rxq; i++) {\n+\t\t\t\tice_free_rx_ring(vsi->rx_rings[i]);\n+\t\t\t\t/* copy the real tail offset */\n+\t\t\t\trx_rings[i].tail = vsi->rx_rings[i]->tail;\n+\t\t\t\t/* this is to fake out the allocation routine\n+\t\t\t\t * into thinking it has to realloc everything\n+\t\t\t\t * but the recycling logic will let us re-use\n+\t\t\t\t * the buffers allocated above\n+\t\t\t\t */\n+\t\t\t\trx_rings[i].next_to_use = 0;\n+\t\t\t\trx_rings[i].next_to_clean = 0;\n+\t\t\t\trx_rings[i].next_to_alloc = 0;\n+\t\t\t\t*vsi->rx_rings[i] = rx_rings[i];\n+\t\t\t}\n+\t\t\tdevm_kfree(&pf->pdev->dev, rx_rings);\n+\t\t}\n+\n+\t\tice_up(vsi);\n+\t}\n+\tgoto done;\n+\n+free_tx:\n+\t/* error cleanup if the Rx allocations failed after getting Tx */\n+\tif (tx_rings) {\n+\t\tfor (i = 0; i < vsi->alloc_txq; i++)\n+\t\t\tice_free_tx_ring(&tx_rings[i]);\n+\t\tdevm_kfree(&pf->pdev->dev, tx_rings);\n+\t}\n+\n+done:\n+\tclear_bit(__ICE_CFG_BUSY, pf->state);\n+\treturn err;\n+}\n+\n+static int ice_nway_reset(struct net_device *netdev)\n+{\n+\t/* restart autonegotiation */\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_link_status *hw_link_info;\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tstruct ice_port_info *pi;\n+\tenum ice_status status;\n+\tbool link_up;\n+\n+\tpi = vsi->port_info;\n+\thw_link_info = &pi->phy.link_info;\n+\tlink_up = hw_link_info->link_info & ICE_AQ_LINK_UP;\n+\n+\tstatus = ice_aq_set_link_restart_an(pi, link_up, NULL);\n+\tif (status) {\n+\t\tnetdev_info(netdev, \"link restart failed, err %d aq_err %d\\n\",\n+\t\t\t status, pi->hw->adminq.sq_last_status);\n+\t\treturn -EIO;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_get_pauseparam - Get Flow Control status\n+ * @netdev: network interface device structure\n+ * @pause: ethernet pause (flow control) parameters\n+ */\n+static void\n+ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_port_info *pi;\n+\n+\tpi = np->vsi->port_info;\n+\tpause->autoneg =\n+\t\t((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ?\n+\t\t AUTONEG_ENABLE : AUTONEG_DISABLE);\n+\n+\tif (pi->fc.current_mode == ICE_FC_RX_PAUSE) {\n+\t\tpause->rx_pause = 1;\n+\t} else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) {\n+\t\tpause->tx_pause = 1;\n+\t} else if (pi->fc.current_mode == ICE_FC_FULL) {\n+\t\tpause->rx_pause = 1;\n+\t\tpause->tx_pause = 1;\n+\t}\n+}\n+\n+/**\n+ * ice_set_pauseparam - Set Flow Control parameter\n+ * @netdev: network interface device structure\n+ * @pause: return tx/rx flow control status\n+ */\n+static int\n+ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_link_status *hw_link_info;\n+\tstruct ice_pf *pf = np->vsi->back;\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tstruct ice_port_info *pi;\n+\tenum ice_status status;\n+\tu8 aq_failures;\n+\tbool link_up;\n+\tint err = 0;\n+\n+\tpi = vsi->port_info;\n+\thw_link_info = &pi->phy.link_info;\n+\tlink_up = hw_link_info->link_info & ICE_AQ_LINK_UP;\n+\n+\t/* Changing the port's flow control is not supported if this isn't the\n+\t * PF VSI\n+\t */\n+\tif (vsi->type != ICE_VSI_PF) {\n+\t\tnetdev_info(netdev, \"Changing flow control parameters only supported for PF VSI\\n\");\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\tif (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {\n+\t\tnetdev_info(netdev, \"To change autoneg please use: ethtool -s <dev> autoneg <on|off>\\n\");\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\t/* If we have link and don't have autoneg */\n+\tif (!test_bit(__ICE_DOWN, pf->state) &&\n+\t !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {\n+\t\t/* Send message that it might not necessarily work*/\n+\t\tnetdev_info(netdev, \"Autoneg did not complete so changing settings may not result in an actual change.\\n\");\n+\t}\n+\n+\tif (pause->rx_pause && pause->tx_pause)\n+\t\tpi->fc.req_mode = ICE_FC_FULL;\n+\telse if (pause->rx_pause && !pause->tx_pause)\n+\t\tpi->fc.req_mode = ICE_FC_RX_PAUSE;\n+\telse if (!pause->rx_pause && pause->tx_pause)\n+\t\tpi->fc.req_mode = ICE_FC_TX_PAUSE;\n+\telse if (!pause->rx_pause && !pause->tx_pause)\n+\t\tpi->fc.req_mode = ICE_FC_NONE;\n+\telse\n+\t\treturn -EINVAL;\n+\n+\t/* Tell the OS link is going down, the link will go back up when fw\n+\t * says it is ready asynchronously\n+\t */\n+\tice_print_link_msg(vsi, false);\n+\tnetif_carrier_off(netdev);\n+\tnetif_tx_stop_all_queues(netdev);\n+\n+\t/* Set the FC mode and only restart AN if link is up */\n+\tstatus = ice_set_fc(pi, &aq_failures, link_up);\n+\n+\tif (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {\n+\t\tnetdev_info(netdev, \"Set fc failed on the get_phy_capabilities call with err %d aq_err %d\\n\",\n+\t\t\t status, hw->adminq.sq_last_status);\n+\t\terr = -EAGAIN;\n+\t} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {\n+\t\tnetdev_info(netdev, \"Set fc failed on the set_phy_config call with err %d aq_err %d\\n\",\n+\t\t\t status, hw->adminq.sq_last_status);\n+\t\terr = -EAGAIN;\n+\t} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {\n+\t\tnetdev_info(netdev, \"Set fc failed on the get_link_info call with err %d aq_err %d\\n\",\n+\t\t\t status, hw->adminq.sq_last_status);\n+\t\terr = -EAGAIN;\n+\t}\n+\n+\tif (!test_bit(__ICE_DOWN, pf->state)) {\n+\t\t/* Give it a little more time to try to come back */\n+\t\tmsleep(75);\n+\t\tif (!test_bit(__ICE_DOWN, pf->state))\n+\t\t\treturn ice_nway_reset(netdev);\n+\t}\n+\n+\treturn err;\n+}\n+\n+/**\n+ * ice_get_rxfh_key_size - get the RSS hash key size\n+ * @netdev: network interface device structure\n+ *\n+ * Returns the table size.\n+ */\n+static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)\n+{\n+\treturn ICE_VSIQF_HKEY_ARRAY_SIZE;\n+}\n+\n+/**\n+ * ice_get_rxfh_indir_size - get the rx flow hash indirection table size\n+ * @netdev: network interface device structure\n+ *\n+ * Returns the table size.\n+ */\n+static u32 ice_get_rxfh_indir_size(struct net_device *netdev)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\n+\treturn np->vsi->rss_table_size;\n+}\n+\n+/**\n+ * ice_get_rxfh - get the rx flow hash indirection table\n+ * @netdev: network interface device structure\n+ * @indir: indirection table\n+ * @key: hash key\n+ * @hfunc: hash function\n+ *\n+ * Reads the indirection table directly from the hardware.\n+ */\n+static int\n+ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tstruct ice_pf *pf = vsi->back;\n+\tint ret = 0, i;\n+\tu8 *lut;\n+\n+\tif (hfunc)\n+\t\t*hfunc = ETH_RSS_HASH_TOP;\n+\n+\tif (!indir)\n+\t\treturn 0;\n+\n+\tif (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {\n+\t\t/* RSS not supported return error here */\n+\t\tnetdev_warn(netdev, \"RSS is not configured on this VSI!\\n\");\n+\t\treturn -EIO;\n+\t}\n+\n+\tlut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);\n+\tif (!lut)\n+\t\treturn -ENOMEM;\n+\n+\tif (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) {\n+\t\tret = -EIO;\n+\t\tgoto out;\n+\t}\n+\n+\tfor (i = 0; i < vsi->rss_table_size; i++)\n+\t\tindir[i] = (u32)(lut[i]);\n+\n+out:\n+\tdevm_kfree(&pf->pdev->dev, lut);\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_set_rxfh - set the rx flow hash indirection table\n+ * @netdev: network interface device structure\n+ * @indir: indirection table\n+ * @key: hash key\n+ * @hfunc: hash function\n+ *\n+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise\n+ * returns 0 after programming the table.\n+ */\n+static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,\n+\t\t\tconst u8 *key, const u8 hfunc)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tstruct ice_pf *pf = vsi->back;\n+\tu8 *seed = NULL;\n+\n+\tif (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)\n+\t\treturn -EOPNOTSUPP;\n+\n+\tif (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {\n+\t\t/* RSS not supported return error here */\n+\t\tnetdev_warn(netdev, \"RSS is not configured on this VSI!\\n\");\n+\t\treturn -EIO;\n+\t}\n+\n+\tif (key) {\n+\t\tif (!vsi->rss_hkey_user) {\n+\t\t\tvsi->rss_hkey_user =\n+\t\t\t\tdevm_kzalloc(&pf->pdev->dev,\n+\t\t\t\t\t ICE_VSIQF_HKEY_ARRAY_SIZE,\n+\t\t\t\t\t GFP_KERNEL);\n+\t\t\tif (!vsi->rss_hkey_user)\n+\t\t\t\treturn -ENOMEM;\n+\t\t}\n+\t\tmemcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);\n+\t\tseed = vsi->rss_hkey_user;\n+\t}\n+\n+\tif (!vsi->rss_lut_user) {\n+\t\tvsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,\n+\t\t\t\t\t\t vsi->rss_table_size,\n+\t\t\t\t\t\t GFP_KERNEL);\n+\t\tif (!vsi->rss_lut_user)\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Each 32 bits pointed by 'indir' is stored with a lut entry */\n+\tif (indir) {\n+\t\tint i;\n+\n+\t\tfor (i = 0; i < vsi->rss_table_size; i++)\n+\t\t\tvsi->rss_lut_user[i] = (u8)(indir[i]);\n+\t} else {\n+\t\tice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,\n+\t\t\t\t vsi->rss_size);\n+\t}\n+\n+\tif (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size))\n+\t\treturn -EIO;\n+\n+\treturn 0;\n+}\n+\n+static const struct ethtool_ops ice_ethtool_ops = {\n+\t.get_link_ksettings\t= ice_get_link_ksettings,\n+\t.get_drvinfo = ice_get_drvinfo,\n+\t.get_regs_len = ice_get_regs_len,\n+\t.get_regs = ice_get_regs,\n+\t.get_msglevel = ice_get_msglevel,\n+\t.set_msglevel = ice_set_msglevel,\n+\t.get_link\t\t= ethtool_op_get_link,\n+\t.get_strings\t\t= ice_get_strings,\n+\t.get_ethtool_stats = ice_get_ethtool_stats,\n+\t.get_sset_count\t\t= ice_get_sset_count,\n+\t.get_rxnfc\t\t= ice_get_rxnfc,\n+\t.get_ringparam\t\t= ice_get_ringparam,\n+\t.set_ringparam\t\t= ice_set_ringparam,\n+\t.nway_reset\t\t= ice_nway_reset,\n+\t.get_pauseparam\t\t= ice_get_pauseparam,\n+\t.set_pauseparam\t\t= ice_set_pauseparam,\n+\t.get_rxfh_key_size\t= ice_get_rxfh_key_size,\n+\t.get_rxfh_indir_size\t= ice_get_rxfh_indir_size,\n+\t.get_rxfh\t\t= ice_get_rxfh,\n+\t.set_rxfh\t\t= ice_set_rxfh,\n+};\n+\n+/**\n+ * ice_set_ethtool_ops - setup netdev ethtool ops\n+ * @netdev: network interface device structure\n+ *\n+ * setup netdev ethtool ops with ice specific ops\n+ */\n+void ice_set_ethtool_ops(struct net_device *netdev)\n+{\n+\tnetdev->ethtool_ops = &ice_ethtool_ops;\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h\nindex 6303489866a4..0d24ec3ca975 100644\n--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h\n+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h\n@@ -108,6 +108,8 @@\n #define PFGEN_CTRL\t\t\t0x00091000\n #define PFGEN_CTRL_PFSWR_S\t\t0\n #define PFGEN_CTRL_PFSWR_M\t\tBIT(PFGEN_CTRL_PFSWR_S)\n+#define PFGEN_STATE\t\t\t0x00088000\n+#define PRTGEN_STATUS\t\t\t0x000B8100\n #define PFHMC_ERRORDATA\t\t\t0x00520500\n #define PFHMC_ERRORINFO\t\t\t0x00520400\n #define GLINT_DYN_CTL(_INT)\t\t(0x00160000 + ((_INT) * 4))\n@@ -179,6 +181,7 @@\n #define QRX_CTRL_QENA_REQ_M\t\tBIT(QRX_CTRL_QENA_REQ_S)\n #define QRX_CTRL_QENA_STAT_S\t\t2\n #define QRX_CTRL_QENA_STAT_M\t\tBIT(QRX_CTRL_QENA_STAT_S)\n+#define QRX_ITR(_QRX)\t\t\t(0x00292000 + ((_QRX) * 4))\n #define QRX_TAIL(_QRX)\t\t\t(0x00290000 + ((_QRX) * 4))\n #define GLNVM_FLA\t\t\t0x000B6108\n #define GLNVM_FLA_LOCKED_S\t\t6\n@@ -194,5 +197,82 @@\n #define PF_FUNC_RID\t\t\t0x0009E880\n #define PF_FUNC_RID_FUNC_NUM_S\t\t0\n #define PF_FUNC_RID_FUNC_NUM_M\t\tICE_M(0x7, PF_FUNC_RID_FUNC_NUM_S)\n+#define GLPRT_BPRCH(_i)\t\t\t(0x00381384 + ((_i) * 8))\n+#define GLPRT_BPRCL(_i)\t\t\t(0x00381380 + ((_i) * 8))\n+#define GLPRT_BPTCH(_i)\t\t\t(0x00381244 + ((_i) * 8))\n+#define GLPRT_BPTCL(_i)\t\t\t(0x00381240 + ((_i) * 8))\n+#define GLPRT_CRCERRS(_i)\t\t(0x00380100 + ((_i) * 8))\n+#define GLPRT_GORCH(_i)\t\t\t(0x00380004 + ((_i) * 8))\n+#define GLPRT_GORCL(_i)\t\t\t(0x00380000 + ((_i) * 8))\n+#define GLPRT_GOTCH(_i)\t\t\t(0x00380B44 + ((_i) * 8))\n+#define GLPRT_GOTCL(_i)\t\t\t(0x00380B40 + ((_i) * 8))\n+#define GLPRT_ILLERRC(_i)\t\t(0x003801C0 + ((_i) * 8))\n+#define GLPRT_LXOFFRXC(_i)\t\t(0x003802C0 + ((_i) * 8))\n+#define GLPRT_LXOFFTXC(_i)\t\t(0x00381180 + ((_i) * 8))\n+#define GLPRT_LXONRXC(_i)\t\t(0x00380280 + ((_i) * 8))\n+#define GLPRT_LXONTXC(_i)\t\t(0x00381140 + ((_i) * 8))\n+#define GLPRT_MLFC(_i)\t\t\t(0x00380040 + ((_i) * 8))\n+#define GLPRT_MPRCH(_i)\t\t\t(0x00381344 + ((_i) * 8))\n+#define GLPRT_MPRCL(_i)\t\t\t(0x00381340 + ((_i) * 8))\n+#define GLPRT_MPTCH(_i)\t\t\t(0x00381204 + ((_i) * 8))\n+#define GLPRT_MPTCL(_i)\t\t\t(0x00381200 + ((_i) * 8))\n+#define GLPRT_MRFC(_i)\t\t\t(0x00380080 + ((_i) * 8))\n+#define GLPRT_PRC1023H(_i)\t\t(0x00380A04 + ((_i) * 8))\n+#define GLPRT_PRC1023L(_i)\t\t(0x00380A00 + ((_i) * 8))\n+#define GLPRT_PRC127H(_i)\t\t(0x00380944 + ((_i) * 8))\n+#define GLPRT_PRC127L(_i)\t\t(0x00380940 + ((_i) * 8))\n+#define GLPRT_PRC1522H(_i)\t\t(0x00380A44 + ((_i) * 8))\n+#define GLPRT_PRC1522L(_i)\t\t(0x00380A40 + ((_i) * 8))\n+#define GLPRT_PRC255H(_i)\t\t(0x00380984 + ((_i) * 8))\n+#define GLPRT_PRC255L(_i)\t\t(0x00380980 + ((_i) * 8))\n+#define GLPRT_PRC511H(_i)\t\t(0x003809C4 + ((_i) * 8))\n+#define GLPRT_PRC511L(_i)\t\t(0x003809C0 + ((_i) * 8))\n+#define GLPRT_PRC64H(_i)\t\t(0x00380904 + ((_i) * 8))\n+#define GLPRT_PRC64L(_i)\t\t(0x00380900 + ((_i) * 8))\n+#define GLPRT_PRC9522H(_i)\t\t(0x00380A84 + ((_i) * 8))\n+#define GLPRT_PRC9522L(_i)\t\t(0x00380A80 + ((_i) * 8))\n+#define GLPRT_PTC1023H(_i)\t\t(0x00380C84 + ((_i) * 8))\n+#define GLPRT_PTC1023L(_i)\t\t(0x00380C80 + ((_i) * 8))\n+#define GLPRT_PTC127H(_i)\t\t(0x00380BC4 + ((_i) * 8))\n+#define GLPRT_PTC127L(_i)\t\t(0x00380BC0 + ((_i) * 8))\n+#define GLPRT_PTC1522H(_i)\t\t(0x00380CC4 + ((_i) * 8))\n+#define GLPRT_PTC1522L(_i)\t\t(0x00380CC0 + ((_i) * 8))\n+#define GLPRT_PTC255H(_i)\t\t(0x00380C04 + ((_i) * 8))\n+#define GLPRT_PTC255L(_i)\t\t(0x00380C00 + ((_i) * 8))\n+#define GLPRT_PTC511H(_i)\t\t(0x00380C44 + ((_i) * 8))\n+#define GLPRT_PTC511L(_i)\t\t(0x00380C40 + ((_i) * 8))\n+#define GLPRT_PTC64H(_i)\t\t(0x00380B84 + ((_i) * 8))\n+#define GLPRT_PTC64L(_i)\t\t(0x00380B80 + ((_i) * 8))\n+#define GLPRT_PTC9522H(_i)\t\t(0x00380D04 + ((_i) * 8))\n+#define GLPRT_PTC9522L(_i)\t\t(0x00380D00 + ((_i) * 8))\n+#define GLPRT_RFC(_i)\t\t\t(0x00380AC0 + ((_i) * 8))\n+#define GLPRT_RJC(_i)\t\t\t(0x00380B00 + ((_i) * 8))\n+#define GLPRT_RLEC(_i)\t\t\t(0x00380140 + ((_i) * 8))\n+#define GLPRT_ROC(_i)\t\t\t(0x00380240 + ((_i) * 8))\n+#define GLPRT_RUC(_i)\t\t\t(0x00380200 + ((_i) * 8))\n+#define GLPRT_TDOLD(_i)\t\t\t(0x00381280 + ((_i) * 8))\n+#define GLPRT_UPRCH(_i)\t\t\t(0x00381304 + ((_i) * 8))\n+#define GLPRT_UPRCL(_i)\t\t\t(0x00381300 + ((_i) * 8))\n+#define GLPRT_UPTCH(_i)\t\t\t(0x003811C4 + ((_i) * 8))\n+#define GLPRT_UPTCL(_i)\t\t\t(0x003811C0 + ((_i) * 8))\n+#define GLV_BPRCH(_i)\t\t\t(0x003B6004 + ((_i) * 8))\n+#define GLV_BPRCL(_i)\t\t\t(0x003B6000 + ((_i) * 8))\n+#define GLV_BPTCH(_i)\t\t\t(0x0030E004 + ((_i) * 8))\n+#define GLV_BPTCL(_i)\t\t\t(0x0030E000 + ((_i) * 8))\n+#define GLV_GORCH(_i)\t\t\t(0x003B0004 + ((_i) * 8))\n+#define GLV_GORCL(_i)\t\t\t(0x003B0000 + ((_i) * 8))\n+#define GLV_GOTCH(_i)\t\t\t(0x00300004 + ((_i) * 8))\n+#define GLV_GOTCL(_i)\t\t\t(0x00300000 + ((_i) * 8))\n+#define GLV_MPRCH(_i)\t\t\t(0x003B4004 + ((_i) * 8))\n+#define GLV_MPRCL(_i)\t\t\t(0x003B4000 + ((_i) * 8))\n+#define GLV_MPTCH(_i)\t\t\t(0x0030C004 + ((_i) * 8))\n+#define GLV_MPTCL(_i)\t\t\t(0x0030C000 + ((_i) * 8))\n+#define GLV_RDPC(_i)\t\t\t(0x00294C04 + ((_i) * 4))\n+#define GLV_TEPC(_VSI)\t\t\t(0x00312000 + ((_VSI) * 4))\n+#define GLV_UPRCH(_i)\t\t\t(0x003B2004 + ((_i) * 8))\n+#define GLV_UPRCL(_i)\t\t\t(0x003B2000 + ((_i) * 8))\n+#define GLV_UPTCH(_i)\t\t\t(0x0030A004 + ((_i) * 8))\n+#define GLV_UPTCL(_i)\t\t\t(0x0030A000 + ((_i) * 8))\n+#define VSIQF_HKEY_MAX_INDEX\t\t12\n \n #endif /* _ICE_HW_AUTOGEN_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c\nindex f966d783ec63..1dbe510e80fa 100644\n--- a/drivers/net/ethernet/intel/ice/ice_main.c\n+++ b/drivers/net/ethernet/intel/ice/ice_main.c\n@@ -23,7 +23,7 @@\n \n #define DRV_VERSION\t\"ice-0.0.1-k\"\n #define DRV_SUMMARY\t\"Intel(R) Ethernet Connection E800 Series Linux Driver\"\n-static const char ice_drv_ver[] = DRV_VERSION;\n+const char ice_drv_ver[] = DRV_VERSION;\n static const char ice_driver_string[] = DRV_SUMMARY;\n static const char ice_copyright[] = \"Copyright (c) 2018, Intel Corporation.\";\n \n@@ -44,6 +44,8 @@ static struct workqueue_struct *ice_wq;\n static const struct net_device_ops ice_netdev_ops;\n \n static int ice_vsi_release(struct ice_vsi *vsi);\n+static void ice_update_vsi_stats(struct ice_vsi *vsi);\n+static void ice_update_pf_stats(struct ice_pf *pf);\n \n /**\n * ice_get_free_slot - get the next non-NULL location index in array\n@@ -228,12 +230,41 @@ static void ice_free_fltr_list(struct device *dev, struct list_head *h)\n \t}\n }\n \n+/**\n+ * ice_watchdog_subtask - periodic tasks not using event driven scheduling\n+ * @pf: board private structure\n+ */\n+static void ice_watchdog_subtask(struct ice_pf *pf)\n+{\n+\tint i;\n+\n+\t/* if interface is down do nothing */\n+\tif (test_bit(__ICE_DOWN, pf->state) ||\n+\t test_bit(__ICE_CFG_BUSY, pf->state))\n+\t\treturn;\n+\n+\t/* make sure we don't do these things too often */\n+\tif (time_before(jiffies,\n+\t\t\tpf->serv_tmr_prev + pf->serv_tmr_period))\n+\t\treturn;\n+\n+\tpf->serv_tmr_prev = jiffies;\n+\n+\t/* Update the stats for active netdevs so the network stack\n+\t * can look at updated numbers whenever it cares to\n+\t */\n+\tice_update_pf_stats(pf);\n+\tfor (i = 0; i < pf->num_alloc_vsi; i++)\n+\t\tif (pf->vsi[i] && pf->vsi[i]->netdev)\n+\t\t\tice_update_vsi_stats(pf->vsi[i]);\n+}\n+\n /**\n * ice_print_link_msg - print link up or down message\n * @vsi: the VSI whose link status is being queried\n * @isup: boolean for if the link is now up or down\n */\n-static void ice_print_link_msg(struct ice_vsi *vsi, bool isup)\n+void ice_print_link_msg(struct ice_vsi *vsi, bool isup)\n {\n \tconst char *speed;\n \tconst char *fc;\n@@ -466,6 +497,7 @@ static void ice_service_task(struct work_struct *work)\n \tunsigned long start_time = jiffies;\n \n \t/* subtasks */\n+\tice_watchdog_subtask(pf);\n \tice_clean_adminq_subtask(pf);\n \n \t/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */\n@@ -1777,6 +1809,8 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)\n \t/* setup watchdog timeout value to be 5 second */\n \tnetdev->watchdog_timeo = 5 * HZ;\n \n+\tice_set_ethtool_ops(netdev);\n+\n \tnetdev->min_mtu = ETH_MIN_MTU;\n \tnetdev->max_mtu = ICE_MAX_MTU;\n \n@@ -3473,6 +3507,434 @@ static int ice_up_complete(struct ice_vsi *vsi)\n \treturn err;\n }\n \n+/**\n+ * ice_up - Bring the connection back up after being down\n+ * @vsi: VSI being configured\n+ */\n+int ice_up(struct ice_vsi *vsi)\n+{\n+\tint err;\n+\n+\terr = ice_vsi_cfg(vsi);\n+\tif (!err)\n+\t\terr = ice_up_complete(vsi);\n+\n+\treturn err;\n+}\n+\n+/**\n+ * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring\n+ * @ring: Tx or Rx ring to read stats from\n+ * @pkts: packets stats counter\n+ * @bytes: bytes stats counter\n+ *\n+ * This function fetches stats from the ring considering the atomic operations\n+ * that needs to be performed to read u64 values in 32 bit machine.\n+ */\n+static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,\n+\t\t\t\t\t u64 *bytes)\n+{\n+\tunsigned int start;\n+\t*pkts = 0;\n+\t*bytes = 0;\n+\n+\tif (!ring)\n+\t\treturn;\n+\tdo {\n+\t\tstart = u64_stats_fetch_begin_irq(&ring->syncp);\n+\t\t*pkts = ring->stats.pkts;\n+\t\t*bytes = ring->stats.bytes;\n+\t} while (u64_stats_fetch_retry_irq(&ring->syncp, start));\n+}\n+\n+/**\n+ * ice_stat_update40 - read 40 bit stat from the chip and update stat values\n+ * @hw: ptr to the hardware info\n+ * @hireg: high 32 bit HW register to read from\n+ * @loreg: low 32 bit HW register to read from\n+ * @prev_stat_loaded: bool to specify if previous stats are loaded\n+ * @prev_stat: ptr to previous loaded stat value\n+ * @cur_stat: ptr to current stat value\n+ */\n+static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,\n+\t\t\t bool prev_stat_loaded, u64 *prev_stat,\n+\t\t\t u64 *cur_stat)\n+{\n+\tu64 new_data;\n+\n+\tnew_data = rd32(hw, loreg);\n+\tnew_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;\n+\n+\t/* device stats are not reset at PFR, they likely will not be zeroed\n+\t * when the driver starts. So save the first values read and use them as\n+\t * offsets to be subtracted from the raw values in order to report stats\n+\t * that count from zero.\n+\t */\n+\tif (!prev_stat_loaded)\n+\t\t*prev_stat = new_data;\n+\tif (likely(new_data >= *prev_stat))\n+\t\t*cur_stat = new_data - *prev_stat;\n+\telse\n+\t\t/* to manage the potential roll-over */\n+\t\t*cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;\n+\t*cur_stat &= 0xFFFFFFFFFFULL;\n+}\n+\n+/**\n+ * ice_stat_update32 - read 32 bit stat from the chip and update stat values\n+ * @hw: ptr to the hardware info\n+ * @reg: HW register to read from\n+ * @prev_stat_loaded: bool to specify if previous stats are loaded\n+ * @prev_stat: ptr to previous loaded stat value\n+ * @cur_stat: ptr to current stat value\n+ */\n+static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,\n+\t\t\t u64 *prev_stat, u64 *cur_stat)\n+{\n+\tu32 new_data;\n+\n+\tnew_data = rd32(hw, reg);\n+\n+\t/* device stats are not reset at PFR, they likely will not be zeroed\n+\t * when the driver starts. So save the first values read and use them as\n+\t * offsets to be subtracted from the raw values in order to report stats\n+\t * that count from zero.\n+\t */\n+\tif (!prev_stat_loaded)\n+\t\t*prev_stat = new_data;\n+\tif (likely(new_data >= *prev_stat))\n+\t\t*cur_stat = new_data - *prev_stat;\n+\telse\n+\t\t/* to manage the potential roll-over */\n+\t\t*cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;\n+}\n+\n+/**\n+ * ice_update_eth_stats - Update VSI-specific ethernet statistics counters\n+ * @vsi: the VSI to be updated\n+ */\n+static void ice_update_eth_stats(struct ice_vsi *vsi)\n+{\n+\tstruct ice_eth_stats *prev_es, *cur_es;\n+\tstruct ice_hw *hw = &vsi->back->hw;\n+\tu16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */\n+\n+\tprev_es = &vsi->eth_stats_prev;\n+\tcur_es = &vsi->eth_stats;\n+\n+\tice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),\n+\t\t\t vsi->stat_offsets_loaded, &prev_es->rx_bytes,\n+\t\t\t &cur_es->rx_bytes);\n+\n+\tice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),\n+\t\t\t vsi->stat_offsets_loaded, &prev_es->rx_unicast,\n+\t\t\t &cur_es->rx_unicast);\n+\n+\tice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),\n+\t\t\t vsi->stat_offsets_loaded, &prev_es->rx_multicast,\n+\t\t\t &cur_es->rx_multicast);\n+\n+\tice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),\n+\t\t\t vsi->stat_offsets_loaded, &prev_es->rx_broadcast,\n+\t\t\t &cur_es->rx_broadcast);\n+\n+\tice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,\n+\t\t\t &prev_es->rx_discards, &cur_es->rx_discards);\n+\n+\tice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),\n+\t\t\t vsi->stat_offsets_loaded, &prev_es->tx_bytes,\n+\t\t\t &cur_es->tx_bytes);\n+\n+\tice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),\n+\t\t\t vsi->stat_offsets_loaded, &prev_es->tx_unicast,\n+\t\t\t &cur_es->tx_unicast);\n+\n+\tice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),\n+\t\t\t vsi->stat_offsets_loaded, &prev_es->tx_multicast,\n+\t\t\t &cur_es->tx_multicast);\n+\n+\tice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),\n+\t\t\t vsi->stat_offsets_loaded, &prev_es->tx_broadcast,\n+\t\t\t &cur_es->tx_broadcast);\n+\n+\tice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,\n+\t\t\t &prev_es->tx_errors, &cur_es->tx_errors);\n+\n+\tvsi->stat_offsets_loaded = true;\n+}\n+\n+/**\n+ * ice_update_vsi_ring_stats - Update VSI stats counters\n+ * @vsi: the VSI to be updated\n+ */\n+static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)\n+{\n+\tstruct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;\n+\tstruct ice_ring *ring;\n+\tu64 pkts, bytes;\n+\tint i;\n+\n+\t/* reset netdev stats */\n+\tvsi_stats->tx_packets = 0;\n+\tvsi_stats->tx_bytes = 0;\n+\tvsi_stats->rx_packets = 0;\n+\tvsi_stats->rx_bytes = 0;\n+\n+\t/* reset non-netdev (extended) stats */\n+\tvsi->tx_restart = 0;\n+\tvsi->tx_busy = 0;\n+\tvsi->tx_linearize = 0;\n+\tvsi->rx_buf_failed = 0;\n+\tvsi->rx_page_failed = 0;\n+\n+\trcu_read_lock();\n+\n+\t/* update Tx rings counters */\n+\tice_for_each_txq(vsi, i) {\n+\t\tring = READ_ONCE(vsi->tx_rings[i]);\n+\t\tice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);\n+\t\tvsi_stats->tx_packets += pkts;\n+\t\tvsi_stats->tx_bytes += bytes;\n+\t\tvsi->tx_restart += ring->tx_stats.restart_q;\n+\t\tvsi->tx_busy += ring->tx_stats.tx_busy;\n+\t\tvsi->tx_linearize += ring->tx_stats.tx_linearize;\n+\t}\n+\n+\t/* update Rx rings counters */\n+\tice_for_each_rxq(vsi, i) {\n+\t\tring = READ_ONCE(vsi->rx_rings[i]);\n+\t\tice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);\n+\t\tvsi_stats->rx_packets += pkts;\n+\t\tvsi_stats->rx_bytes += bytes;\n+\t\tvsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;\n+\t\tvsi->rx_page_failed += ring->rx_stats.alloc_page_failed;\n+\t}\n+\n+\trcu_read_unlock();\n+}\n+\n+/**\n+ * ice_update_vsi_stats - Update VSI stats counters\n+ * @vsi: the VSI to be updated\n+ */\n+static void ice_update_vsi_stats(struct ice_vsi *vsi)\n+{\n+\tstruct rtnl_link_stats64 *cur_ns = &vsi->net_stats;\n+\tstruct ice_eth_stats *cur_es = &vsi->eth_stats;\n+\tstruct ice_pf *pf = vsi->back;\n+\n+\tif (test_bit(__ICE_DOWN, vsi->state) ||\n+\t test_bit(__ICE_CFG_BUSY, pf->state))\n+\t\treturn;\n+\n+\t/* get stats as recorded by Tx/Rx rings */\n+\tice_update_vsi_ring_stats(vsi);\n+\n+\t/* get VSI stats as recorded by the hardware */\n+\tice_update_eth_stats(vsi);\n+\n+\tcur_ns->tx_errors = cur_es->tx_errors;\n+\tcur_ns->rx_dropped = cur_es->rx_discards;\n+\tcur_ns->tx_dropped = cur_es->tx_discards;\n+\tcur_ns->multicast = cur_es->rx_multicast;\n+\n+\t/* update some more netdev stats if this is main VSI */\n+\tif (vsi->type == ICE_VSI_PF) {\n+\t\tcur_ns->rx_crc_errors = pf->stats.crc_errors;\n+\t\tcur_ns->rx_errors = pf->stats.crc_errors +\n+\t\t\t\t pf->stats.illegal_bytes;\n+\t\tcur_ns->rx_length_errors = pf->stats.rx_len_errors;\n+\t}\n+}\n+\n+/**\n+ * ice_update_pf_stats - Update PF port stats counters\n+ * @pf: PF whose stats needs to be updated\n+ */\n+static void ice_update_pf_stats(struct ice_pf *pf)\n+{\n+\tstruct ice_hw_port_stats *prev_ps, *cur_ps;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tu8 pf_id;\n+\n+\tprev_ps = &pf->stats_prev;\n+\tcur_ps = &pf->stats;\n+\tpf_id = hw->pf_id;\n+\n+\tice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->eth.rx_bytes,\n+\t\t\t &cur_ps->eth.rx_bytes);\n+\n+\tice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->eth.rx_unicast,\n+\t\t\t &cur_ps->eth.rx_unicast);\n+\n+\tice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->eth.rx_multicast,\n+\t\t\t &cur_ps->eth.rx_multicast);\n+\n+\tice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast,\n+\t\t\t &cur_ps->eth.rx_broadcast);\n+\n+\tice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->eth.tx_bytes,\n+\t\t\t &cur_ps->eth.tx_bytes);\n+\n+\tice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->eth.tx_unicast,\n+\t\t\t &cur_ps->eth.tx_unicast);\n+\n+\tice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->eth.tx_multicast,\n+\t\t\t &cur_ps->eth.tx_multicast);\n+\n+\tice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast,\n+\t\t\t &cur_ps->eth.tx_broadcast);\n+\n+\tice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->tx_dropped_link_down,\n+\t\t\t &cur_ps->tx_dropped_link_down);\n+\n+\tice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->rx_size_64,\n+\t\t\t &cur_ps->rx_size_64);\n+\n+\tice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->rx_size_127,\n+\t\t\t &cur_ps->rx_size_127);\n+\n+\tice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->rx_size_255,\n+\t\t\t &cur_ps->rx_size_255);\n+\n+\tice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->rx_size_511,\n+\t\t\t &cur_ps->rx_size_511);\n+\n+\tice_stat_update40(hw, GLPRT_PRC1023H(pf_id),\n+\t\t\t GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);\n+\n+\tice_stat_update40(hw, GLPRT_PRC1522H(pf_id),\n+\t\t\t GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);\n+\n+\tice_stat_update40(hw, GLPRT_PRC9522H(pf_id),\n+\t\t\t GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->rx_size_big, &cur_ps->rx_size_big);\n+\n+\tice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->tx_size_64,\n+\t\t\t &cur_ps->tx_size_64);\n+\n+\tice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->tx_size_127,\n+\t\t\t &cur_ps->tx_size_127);\n+\n+\tice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->tx_size_255,\n+\t\t\t &cur_ps->tx_size_255);\n+\n+\tice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id),\n+\t\t\t pf->stat_prev_loaded, &prev_ps->tx_size_511,\n+\t\t\t &cur_ps->tx_size_511);\n+\n+\tice_stat_update40(hw, GLPRT_PTC1023H(pf_id),\n+\t\t\t GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);\n+\n+\tice_stat_update40(hw, GLPRT_PTC1522H(pf_id),\n+\t\t\t GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);\n+\n+\tice_stat_update40(hw, GLPRT_PTC9522H(pf_id),\n+\t\t\t GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->tx_size_big, &cur_ps->tx_size_big);\n+\n+\tice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);\n+\n+\tice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);\n+\n+\tice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);\n+\n+\tice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);\n+\n+\tice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->crc_errors, &cur_ps->crc_errors);\n+\n+\tice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);\n+\n+\tice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->mac_local_faults,\n+\t\t\t &cur_ps->mac_local_faults);\n+\n+\tice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->mac_remote_faults,\n+\t\t\t &cur_ps->mac_remote_faults);\n+\n+\tice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);\n+\n+\tice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->rx_undersize, &cur_ps->rx_undersize);\n+\n+\tice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->rx_fragments, &cur_ps->rx_fragments);\n+\n+\tice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->rx_oversize, &cur_ps->rx_oversize);\n+\n+\tice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded,\n+\t\t\t &prev_ps->rx_jabber, &cur_ps->rx_jabber);\n+\n+\tpf->stat_prev_loaded = true;\n+}\n+\n+/**\n+ * ice_get_stats64 - get statistics for network device structure\n+ * @netdev: network interface device structure\n+ * @stats: main device statistics structure\n+ */\n+static\n+void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct rtnl_link_stats64 *vsi_stats;\n+\tstruct ice_vsi *vsi = np->vsi;\n+\n+\tvsi_stats = &vsi->net_stats;\n+\n+\tif (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq)\n+\t\treturn;\n+\t/* netdev packet/byte stats come from ring counter. These are obtained\n+\t * by summing up ring counters (done by ice_update_vsi_ring_stats).\n+\t */\n+\tice_update_vsi_ring_stats(vsi);\n+\tstats->tx_packets = vsi_stats->tx_packets;\n+\tstats->tx_bytes = vsi_stats->tx_bytes;\n+\tstats->rx_packets = vsi_stats->rx_packets;\n+\tstats->rx_bytes = vsi_stats->rx_bytes;\n+\n+\t/* The rest of the stats can be read from the hardware but instead we\n+\t * just return values that the watchdog task has already obtained from\n+\t * the hardware.\n+\t */\n+\tstats->multicast = vsi_stats->multicast;\n+\tstats->tx_errors = vsi_stats->tx_errors;\n+\tstats->tx_dropped = vsi_stats->tx_dropped;\n+\tstats->rx_errors = vsi_stats->rx_errors;\n+\tstats->rx_dropped = vsi_stats->rx_dropped;\n+\tstats->rx_crc_errors = vsi_stats->rx_crc_errors;\n+\tstats->rx_length_errors = vsi_stats->rx_length_errors;\n+}\n+\n /**\n * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI\n * @vsi: VSI having NAPI disabled\n@@ -3492,7 +3954,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)\n * ice_down - Shutdown the connection\n * @vsi: The VSI being stopped\n */\n-static int ice_down(struct ice_vsi *vsi)\n+int ice_down(struct ice_vsi *vsi)\n {\n \tint i, err;\n \n@@ -3892,6 +4354,7 @@ static const struct net_device_ops ice_netdev_ops = {\n \t.ndo_open = ice_open,\n \t.ndo_stop = ice_stop,\n \t.ndo_start_xmit = ice_start_xmit,\n+\t.ndo_get_stats64 = ice_get_stats64,\n \t.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,\n \t.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,\n \t.ndo_set_features = ice_set_features,\ndiff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h\nindex 2f1ad8cb2456..a576d173b07d 100644\n--- a/drivers/net/ethernet/intel/ice/ice_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_type.h\n@@ -34,6 +34,7 @@\n #define ICE_DBG_RES\t\tBIT_ULL(17)\n #define ICE_DBG_AQ_MSG\t\tBIT_ULL(24)\n #define ICE_DBG_AQ_CMD\t\tBIT_ULL(27)\n+#define ICE_DBG_USER\t\tBIT_ULL(31)\n \n enum ice_aq_res_ids {\n \tICE_NVM_RES_ID = 1,\n@@ -56,6 +57,13 @@ enum ice_fc_mode {\n \tICE_FC_DFLT\n };\n \n+enum ice_set_fc_aq_failures {\n+\tICE_SET_FC_AQ_FAIL_NONE = 0,\n+\tICE_SET_FC_AQ_FAIL_GET,\n+\tICE_SET_FC_AQ_FAIL_SET,\n+\tICE_SET_FC_AQ_FAIL_UPDATE\n+};\n+\n /* Various MAC types */\n enum ice_mac_type {\n \tICE_MAC_UNKNOWN = 0,\n@@ -315,10 +323,72 @@ struct ice_hw {\n \n };\n \n+/* Statistics collected by each port, VSI, VEB, and S-channel */\n+struct ice_eth_stats {\n+\tu64 rx_bytes;\t\t\t/* gorc */\n+\tu64 rx_unicast;\t\t\t/* uprc */\n+\tu64 rx_multicast;\t\t/* mprc */\n+\tu64 rx_broadcast;\t\t/* bprc */\n+\tu64 rx_discards;\t\t/* rdpc */\n+\tu64 rx_unknown_protocol;\t/* rupp */\n+\tu64 tx_bytes;\t\t\t/* gotc */\n+\tu64 tx_unicast;\t\t\t/* uptc */\n+\tu64 tx_multicast;\t\t/* mptc */\n+\tu64 tx_broadcast;\t\t/* bptc */\n+\tu64 tx_discards;\t\t/* tdpc */\n+\tu64 tx_errors;\t\t\t/* tepc */\n+};\n+\n+/* Statistics collected by the MAC */\n+struct ice_hw_port_stats {\n+\t/* eth stats collected by the port */\n+\tstruct ice_eth_stats eth;\n+\t/* additional port specific stats */\n+\tu64 tx_dropped_link_down;\t/* tdold */\n+\tu64 crc_errors;\t\t\t/* crcerrs */\n+\tu64 illegal_bytes;\t\t/* illerrc */\n+\tu64 error_bytes;\t\t/* errbc */\n+\tu64 mac_local_faults;\t\t/* mlfc */\n+\tu64 mac_remote_faults;\t\t/* mrfc */\n+\tu64 rx_len_errors;\t\t/* rlec */\n+\tu64 link_xon_rx;\t\t/* lxonrxc */\n+\tu64 link_xoff_rx;\t\t/* lxoffrxc */\n+\tu64 link_xon_tx;\t\t/* lxontxc */\n+\tu64 link_xoff_tx;\t\t/* lxofftxc */\n+\tu64 rx_size_64;\t\t\t/* prc64 */\n+\tu64 rx_size_127;\t\t/* prc127 */\n+\tu64 rx_size_255;\t\t/* prc255 */\n+\tu64 rx_size_511;\t\t/* prc511 */\n+\tu64 rx_size_1023;\t\t/* prc1023 */\n+\tu64 rx_size_1522;\t\t/* prc1522 */\n+\tu64 rx_size_big;\t\t/* prc9522 */\n+\tu64 rx_undersize;\t\t/* ruc */\n+\tu64 rx_fragments;\t\t/* rfc */\n+\tu64 rx_oversize;\t\t/* roc */\n+\tu64 rx_jabber;\t\t\t/* rjc */\n+\tu64 tx_size_64;\t\t\t/* ptc64 */\n+\tu64 tx_size_127;\t\t/* ptc127 */\n+\tu64 tx_size_255;\t\t/* ptc255 */\n+\tu64 tx_size_511;\t\t/* ptc511 */\n+\tu64 tx_size_1023;\t\t/* ptc1023 */\n+\tu64 tx_size_1522;\t\t/* ptc1522 */\n+\tu64 tx_size_big;\t\t/* ptc9522 */\n+};\n+\n /* Checksum and Shadow RAM pointers */\n #define ICE_SR_NVM_DEV_STARTER_VER\t0x18\n #define ICE_SR_NVM_EETRACK_LO\t\t0x2D\n #define ICE_SR_NVM_EETRACK_HI\t\t0x2E\n+#define ICE_NVM_VER_LO_SHIFT\t\t0\n+#define ICE_NVM_VER_LO_MASK\t\t(0xff << ICE_NVM_VER_LO_SHIFT)\n+#define ICE_NVM_VER_HI_SHIFT\t\t12\n+#define ICE_NVM_VER_HI_MASK\t\t(0xf << ICE_NVM_VER_HI_SHIFT)\n+#define ICE_OEM_VER_PATCH_SHIFT\t\t0\n+#define ICE_OEM_VER_PATCH_MASK\t\t(0xff << ICE_OEM_VER_PATCH_SHIFT)\n+#define ICE_OEM_VER_BUILD_SHIFT\t\t8\n+#define ICE_OEM_VER_BUILD_MASK\t\t(0xffff << ICE_OEM_VER_BUILD_SHIFT)\n+#define ICE_OEM_VER_SHIFT\t\t24\n+#define ICE_OEM_VER_MASK\t\t(0xff << ICE_OEM_VER_SHIFT)\n #define ICE_SR_SECTOR_SIZE_IN_WORDS\t0x800\n #define ICE_SR_WORDS_IN_1KB\t\t512\n \n", "prefixes": [ "v3", "12/15" ] }