get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/888276/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 888276,
    "url": "http://patchwork.ozlabs.org/api/patches/888276/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180320145819.19133-15-anirudh.venkataramanan@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20180320145819.19133-15-anirudh.venkataramanan@intel.com>",
    "list_archive_url": null,
    "date": "2018-03-20T14:58:18",
    "name": "[v4,14/15] ice: Support link events, reset and rebuild",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "57ba87897079683357d21092a58c90945b77ec4e",
    "submitter": {
        "id": 73601,
        "url": "http://patchwork.ozlabs.org/api/people/73601/?format=api",
        "name": "Anirudh Venkataramanan",
        "email": "anirudh.venkataramanan@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180320145819.19133-15-anirudh.venkataramanan@intel.com/mbox/",
    "series": [
        {
            "id": 34850,
            "url": "http://patchwork.ozlabs.org/api/series/34850/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=34850",
            "date": "2018-03-20T14:58:19",
            "name": "Add ice driver",
            "version": 4,
            "mbox": "http://patchwork.ozlabs.org/series/34850/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/888276/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/888276/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.133; helo=hemlock.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=none (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 405GKT2t9Rz9s0r\n\tfor <incoming@patchwork.ozlabs.org>;\n\tWed, 21 Mar 2018 01:58:53 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id BED2289881;\n\tTue, 20 Mar 2018 14:58:51 +0000 (UTC)",
            "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 3yxd8LvPg9PB; Tue, 20 Mar 2018 14:58:44 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 254D189892;\n\tTue, 20 Mar 2018 14:58:42 +0000 (UTC)",
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\tby ash.osuosl.org (Postfix) with ESMTP id C32A01C0D70\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 20 Mar 2018 14:58:40 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id BE395879B9\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 20 Mar 2018 14:58:40 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id Tuvm1fIp5Hpp for <intel-wired-lan@lists.osuosl.org>;\n\tTue, 20 Mar 2018 14:58:29 +0000 (UTC)",
            "from mga17.intel.com (mga17.intel.com [192.55.52.151])\n\tby whitealder.osuosl.org (Postfix) with ESMTPS id 502BF879A9\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 20 Mar 2018 14:58:23 +0000 (UTC)",
            "from fmsmga006.fm.intel.com ([10.253.24.20])\n\tby fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t20 Mar 2018 07:58:22 -0700",
            "from shasta.jf.intel.com ([10.166.241.32])\n\tby fmsmga006.fm.intel.com with ESMTP; 20 Mar 2018 07:58:22 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.48,335,1517904000\"; d=\"scan'208\";a=\"213001025\"",
        "From": "Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Tue, 20 Mar 2018 07:58:18 -0700",
        "Message-Id": "<20180320145819.19133-15-anirudh.venkataramanan@intel.com>",
        "X-Mailer": "git-send-email 2.14.3",
        "In-Reply-To": "<20180320145819.19133-1-anirudh.venkataramanan@intel.com>",
        "References": "<20180320145819.19133-1-anirudh.venkataramanan@intel.com>",
        "Subject": "[Intel-wired-lan] [PATCH v4 14/15] ice: Support link events,\n\treset and rebuild",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.24",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "Link events are posted to a PF's admin receive queue (ARQ). This patch\nadds the ability to detect and process link events.\n\nThis patch also adds the ability to process resets.\n\nThe driver can process the following resets:\n    1) EMP Reset (EMPR)\n    2) Global Reset (GLOBR)\n    3) Core Reset (CORER)\n    4) Physical Function Reset (PFR)\n\nEMPR is the largest level of reset that the driver can handle. An EMPR\nresets the manageability block and also the data path, including PHY and\nlink for all the PFs. The affected PFs are notified of this event through\na miscellaneous interrupt.\n\nGLOBR is a subset of EMPR. It does everything EMPR does except that it\ndoesn't reset the manageability block.\n\nCORER is a subset of GLOBR. It does everything GLOBR does but doesn't\nreset PHY and link.\n\nPFR is a subset of CORER and affects only the given physical function.\nIn other words, PFR can be thought of as a CORER for a single PF. Since\nonly the issuing PF is affected, a PFR doesn't result in the miscellaneousi\ninterrupt being triggered.\n\nAll the resets have the following in common:\n1) Tx/Rx is halted and all queues are stopped.\n2) All the VSIs and filters programmed for the PF are lost and have to be\n   reprogrammed.\n3) Control queue interfaces are reset and have to be reprogrammed.\n\nIn the rebuild flow, control queues are reinitialized, VSIs are reallocated\nand filters are restored.\n\nSigned-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>\n---\n drivers/net/ethernet/intel/ice/ice.h            |  19 +\n drivers/net/ethernet/intel/ice/ice_adminq_cmd.h |  19 +\n drivers/net/ethernet/intel/ice/ice_common.c     |  60 +++\n drivers/net/ethernet/intel/ice/ice_common.h     |   5 +\n drivers/net/ethernet/intel/ice/ice_hw_autogen.h |   2 +\n drivers/net/ethernet/intel/ice/ice_main.c       | 581 +++++++++++++++++++++++-\n drivers/net/ethernet/intel/ice/ice_type.h       |   1 +\n 7 files changed, 681 insertions(+), 6 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h\nindex cb1e8a127af1..6d7d03b80dbf 100644\n--- a/drivers/net/ethernet/intel/ice/ice.h\n+++ b/drivers/net/ethernet/intel/ice/ice.h\n@@ -92,6 +92,11 @@ extern const char ice_drv_ver[];\n #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))\n #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))\n \n+/* Macro for each VSI in a PF */\n+#define ice_for_each_vsi(pf, i) \\\n+\tfor ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)\n+\n+/* Macros for each tx/rx ring in a VSI */\n #define ice_for_each_txq(vsi, i) \\\n \tfor ((i) = 0; (i) < (vsi)->num_txq; (i)++)\n \n@@ -123,7 +128,16 @@ struct ice_sw {\n \n enum ice_state {\n \t__ICE_DOWN,\n+\t__ICE_NEEDS_RESTART,\n+\t__ICE_RESET_RECOVERY_PENDING,\t/* set by driver when reset starts */\n \t__ICE_PFR_REQ,\t\t\t/* set by driver and peers */\n+\t__ICE_CORER_REQ,\t\t/* set by driver and peers */\n+\t__ICE_GLOBR_REQ,\t\t/* set by driver and peers */\n+\t__ICE_CORER_RECV,\t\t/* set by OICR handler */\n+\t__ICE_GLOBR_RECV,\t\t/* set by OICR handler */\n+\t__ICE_EMPR_RECV,\t\t/* set by OICR handler */\n+\t__ICE_SUSPENDED,\t\t/* set on module remove path */\n+\t__ICE_RESET_FAILED,\t\t/* set by reset/rebuild */\n \t__ICE_ADMINQ_EVENT_PENDING,\n \t__ICE_CFG_BUSY,\n \t__ICE_SERVICE_SCHED,\n@@ -240,6 +254,11 @@ struct ice_pf {\n \tu16 q_left_rx;\t\t/* remaining num rx queues left unclaimed */\n \tu16 next_vsi;\t\t/* Next free slot in pf->vsi[] - 0-based! */\n \tu16 num_alloc_vsi;\n+\tu16 corer_count;\t/* Core reset count */\n+\tu16 globr_count;\t/* Global reset count */\n+\tu16 empr_count;\t\t/* EMP reset count */\n+\tu16 pfr_count;\t\t/* PF reset count */\n+\n \tstruct ice_hw_port_stats stats;\n \tstruct ice_hw_port_stats stats_prev;\n \tstruct ice_hw hw;\ndiff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\nindex 62509635fc5e..8cade22c1cf6 100644\n--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n@@ -1023,6 +1023,23 @@ struct ice_aqc_get_link_status_data {\n \t__le64 reserved4;\n };\n \n+/* Set event mask command (direct 0x0613) */\n+struct ice_aqc_set_event_mask {\n+\tu8\tlport_num;\n+\tu8\treserved[7];\n+\t__le16\tevent_mask;\n+#define ICE_AQ_LINK_EVENT_UPDOWN\t\tBIT(1)\n+#define ICE_AQ_LINK_EVENT_MEDIA_NA\t\tBIT(2)\n+#define ICE_AQ_LINK_EVENT_LINK_FAULT\t\tBIT(3)\n+#define ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM\tBIT(4)\n+#define ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS\tBIT(5)\n+#define ICE_AQ_LINK_EVENT_SIGNAL_DETECT\t\tBIT(6)\n+#define ICE_AQ_LINK_EVENT_AN_COMPLETED\t\tBIT(7)\n+#define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL\tBIT(8)\n+#define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED\tBIT(9)\n+\tu8\treserved1[6];\n+};\n+\n /* NVM Read command (indirect 0x0701)\n  * NVM Erase commands (direct 0x0702)\n  * NVM Update commands (indirect 0x0703)\n@@ -1229,6 +1246,7 @@ struct ice_aq_desc {\n \t\tstruct ice_aqc_dis_txqs dis_txqs;\n \t\tstruct ice_aqc_add_get_update_free_vsi vsi_cmd;\n \t\tstruct ice_aqc_alloc_free_res_cmd sw_res_ctrl;\n+\t\tstruct ice_aqc_set_event_mask set_event_mask;\n \t\tstruct ice_aqc_get_link_status get_link_status;\n \t} params;\n };\n@@ -1308,6 +1326,7 @@ enum ice_adminq_opc {\n \tice_aqc_opc_set_phy_cfg\t\t\t\t= 0x0601,\n \tice_aqc_opc_restart_an\t\t\t\t= 0x0605,\n \tice_aqc_opc_get_link_status\t\t\t= 0x0607,\n+\tice_aqc_opc_set_event_mask\t\t\t= 0x0613,\n \n \t/* NVM commands */\n \tice_aqc_opc_nvm_read\t\t\t\t= 0x0701,\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c\nindex 958161a21115..316262b3c7a0 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.c\n+++ b/drivers/net/ethernet/intel/ice/ice_common.c\n@@ -1441,6 +1441,39 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)\n \treturn status;\n }\n \n+/**\n+ * ice_get_link_status - get status of the HW network link\n+ * @pi: port information structure\n+ * @link_up: pointer to bool (true/false = linkup/linkdown)\n+ *\n+ * Variable link_up is true if link is up, false if link is down.\n+ * The variable link_up is invalid if status is non zero. As a\n+ * result of this call, link status reporting becomes enabled\n+ */\n+enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)\n+{\n+\tstruct ice_phy_info *phy_info;\n+\tenum ice_status status = 0;\n+\n+\tif (!pi)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tphy_info = &pi->phy;\n+\n+\tif (phy_info->get_link_info) {\n+\t\tstatus = ice_update_link_info(pi);\n+\n+\t\tif (status)\n+\t\t\tice_debug(pi->hw, ICE_DBG_LINK,\n+\t\t\t\t  \"get link status error, status = %d\\n\",\n+\t\t\t\t  status);\n+\t}\n+\n+\t*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;\n+\n+\treturn status;\n+}\n+\n /**\n  * ice_aq_set_link_restart_an\n  * @pi: pointer to the port information structure\n@@ -1470,6 +1503,33 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,\n \treturn ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);\n }\n \n+/**\n+ * ice_aq_set_event_mask\n+ * @hw: pointer to the hw struct\n+ * @port_num: port number of the physical function\n+ * @mask: event mask to be set\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Set event mask (0x0613)\n+ */\n+enum ice_status\n+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,\n+\t\t      struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_set_event_mask *cmd;\n+\tstruct ice_aq_desc desc;\n+\n+\tcmd = &desc.params.set_event_mask;\n+\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);\n+\n+\tcmd->lport_num = port_num;\n+\n+\tcmd->event_mask = cpu_to_le16(mask);\n+\n+\treturn ice_aq_send_cmd(hw, &desc, NULL, 0, cd);\n+}\n+\n /**\n  * __ice_aq_get_set_rss_lut\n  * @hw: pointer to the hardware structure\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h\nindex 3e33a47cb61a..2921f3c6ce4b 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.h\n+++ b/drivers/net/ethernet/intel/ice/ice_common.h\n@@ -34,6 +34,8 @@ enum ice_status\n ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,\n \t\t  struct ice_rq_event_info *e, u16 *pending);\n enum ice_status\n+ice_get_link_status(struct ice_port_info *pi, bool *link_up);\n+enum ice_status\n ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,\n \t\tenum ice_aq_res_access_type access);\n void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);\n@@ -80,6 +82,9 @@ enum ice_status\n ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,\n \t\t     struct ice_link_status *link, struct ice_sq_cd *cd);\n enum ice_status\n+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,\n+\t\t      struct ice_sq_cd *cd);\n+enum ice_status\n ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,\n \t\tu32 *q_teids, struct ice_sq_cd *cmd_details);\n enum ice_status\ndiff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h\nindex 0d24ec3ca975..c371043c8946 100644\n--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h\n+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h\n@@ -99,6 +99,8 @@\n #define GLGEN_RSTCTL\t\t\t0x000B8180\n #define GLGEN_RSTCTL_GRSTDEL_S\t\t0\n #define GLGEN_RSTCTL_GRSTDEL_M\t\tICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S)\n+#define GLGEN_RSTAT_RESET_TYPE_S\t2\n+#define GLGEN_RSTAT_RESET_TYPE_M\tICE_M(0x3, GLGEN_RSTAT_RESET_TYPE_S)\n #define GLGEN_RTRIG\t\t\t0x000B8190\n #define GLGEN_RTRIG_CORER_S\t\t0\n #define GLGEN_RTRIG_CORER_M\t\tBIT(GLGEN_RTRIG_CORER_S)\ndiff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c\nindex 8eef9a4c1d13..90f44be5f858 100644\n--- a/drivers/net/ethernet/intel/ice/ice_main.c\n+++ b/drivers/net/ethernet/intel/ice/ice_main.c\n@@ -43,6 +43,8 @@ MODULE_PARM_DESC(debug, \"netif level (0=none,...,16=all)\");\n static struct workqueue_struct *ice_wq;\n static const struct net_device_ops ice_netdev_ops;\n \n+static void ice_pf_dis_all_vsi(struct ice_pf *pf);\n+static void ice_rebuild(struct ice_pf *pf);\n static int ice_vsi_release(struct ice_vsi *vsi);\n static void ice_update_vsi_stats(struct ice_vsi *vsi);\n static void ice_update_pf_stats(struct ice_pf *pf);\n@@ -230,6 +232,132 @@ static void ice_free_fltr_list(struct device *dev, struct list_head *h)\n \t}\n }\n \n+/**\n+ * ice_is_reset_recovery_pending - schedule a reset\n+ * @state: pf state field\n+ */\n+static bool ice_is_reset_recovery_pending(unsigned long int *state)\n+{\n+\treturn test_bit(__ICE_RESET_RECOVERY_PENDING, state);\n+}\n+\n+/**\n+ * ice_prepare_for_reset - prep for the core to reset\n+ * @pf: board private structure\n+ *\n+ * Inform or close all dependent features in prep for reset.\n+ */\n+static void\n+ice_prepare_for_reset(struct ice_pf *pf)\n+{\n+\tstruct ice_hw *hw = &pf->hw;\n+\tu32 v;\n+\n+\tice_for_each_vsi(pf, v)\n+\t\tif (pf->vsi[v])\n+\t\t\tice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num);\n+\n+\tdev_dbg(&pf->pdev->dev, \"Tearing down internal switch for reset\\n\");\n+\n+\t/* disable the VSIs and their queues that are not already DOWN */\n+\t/* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */\n+\tice_pf_dis_all_vsi(pf);\n+\n+\tice_for_each_vsi(pf, v)\n+\t\tif (pf->vsi[v])\n+\t\t\tpf->vsi[v]->vsi_num = 0;\n+\n+\tice_shutdown_all_ctrlq(hw);\n+}\n+\n+/**\n+ * ice_do_reset - Initiate one of many types of resets\n+ * @pf: board private structure\n+ * @reset_type: reset type requested\n+ * before this function was called.\n+ */\n+static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)\n+{\n+\tstruct device *dev = &pf->pdev->dev;\n+\tstruct ice_hw *hw = &pf->hw;\n+\n+\tdev_dbg(dev, \"reset_type 0x%x requested\\n\", reset_type);\n+\tWARN_ON(in_interrupt());\n+\n+\t/* PFR is a bit of a special case because it doesn't result in an OICR\n+\t * interrupt. So for PFR, we prepare for reset, issue the reset and\n+\t * rebuild sequentially.\n+\t */\n+\tif (reset_type == ICE_RESET_PFR) {\n+\t\tset_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);\n+\t\tice_prepare_for_reset(pf);\n+\t}\n+\n+\t/* trigger the reset */\n+\tif (ice_reset(hw, reset_type)) {\n+\t\tdev_err(dev, \"reset %d failed\\n\", reset_type);\n+\t\tset_bit(__ICE_RESET_FAILED, pf->state);\n+\t\tclear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);\n+\t\treturn;\n+\t}\n+\n+\tif (reset_type == ICE_RESET_PFR) {\n+\t\tpf->pfr_count++;\n+\t\tice_rebuild(pf);\n+\t\tclear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);\n+\t}\n+}\n+\n+/**\n+ * ice_reset_subtask - Set up for resetting the device and driver\n+ * @pf: board private structure\n+ */\n+static void ice_reset_subtask(struct ice_pf *pf)\n+{\n+\tenum ice_reset_req reset_type;\n+\n+\trtnl_lock();\n+\n+\t/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an\n+\t * OICR interrupt. The OICR handler (ice_misc_intr) determines what\n+\t * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in\n+\t * pf->state. So if reset/recovery is pending (as indicated by this bit)\n+\t * we do a rebuild and return.\n+\t */\n+\tif (ice_is_reset_recovery_pending(pf->state)) {\n+\t\tclear_bit(__ICE_GLOBR_RECV, pf->state);\n+\t\tclear_bit(__ICE_CORER_RECV, pf->state);\n+\t\tice_prepare_for_reset(pf);\n+\n+\t\t/* make sure we are ready to rebuild */\n+\t\tif (ice_check_reset(&pf->hw))\n+\t\t\tset_bit(__ICE_RESET_FAILED, pf->state);\n+\t\telse\n+\t\t\tice_rebuild(pf);\n+\t\tclear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);\n+\t\tgoto unlock;\n+\t}\n+\n+\t/* No pending resets to finish processing. Check for new resets */\n+\tif (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state))\n+\t\treset_type = ICE_RESET_GLOBR;\n+\telse if (test_and_clear_bit(__ICE_CORER_REQ, pf->state))\n+\t\treset_type = ICE_RESET_CORER;\n+\telse if (test_and_clear_bit(__ICE_PFR_REQ, pf->state))\n+\t\treset_type = ICE_RESET_PFR;\n+\telse\n+\t\tgoto unlock;\n+\n+\t/* reset if not already down or resetting */\n+\tif (!test_bit(__ICE_DOWN, pf->state) &&\n+\t    !test_bit(__ICE_CFG_BUSY, pf->state)) {\n+\t\tice_do_reset(pf, reset_type);\n+\t}\n+\n+unlock:\n+\trtnl_unlock();\n+}\n+\n /**\n  * ice_watchdog_subtask - periodic tasks not using event driven scheduling\n  * @pf: board private structure\n@@ -328,6 +456,144 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)\n \t\t    speed, fc);\n }\n \n+/**\n+ * ice_init_link_events - enable/initialize link events\n+ * @pi: pointer to the port_info instance\n+ *\n+ * Returns -EIO on failure, 0 on success\n+ */\n+static int ice_init_link_events(struct ice_port_info *pi)\n+{\n+\tu16 mask;\n+\n+\tmask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |\n+\t\t       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));\n+\n+\tif (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {\n+\t\tdev_dbg(ice_hw_to_dev(pi->hw),\n+\t\t\t\"Failed to set link event mask for port %d\\n\",\n+\t\t\tpi->lport);\n+\t\treturn -EIO;\n+\t}\n+\n+\tif (ice_aq_get_link_info(pi, true, NULL, NULL)) {\n+\t\tdev_dbg(ice_hw_to_dev(pi->hw),\n+\t\t\t\"Failed to enable link events for port %d\\n\",\n+\t\t\tpi->lport);\n+\t\treturn -EIO;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_vsi_link_event - update the vsi's netdev\n+ * @vsi: the vsi on which the link event occurred\n+ * @link_up: whether or not the vsi needs to be set up or down\n+ */\n+static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)\n+{\n+\tif (!vsi || test_bit(__ICE_DOWN, vsi->state))\n+\t\treturn;\n+\n+\tif (vsi->type == ICE_VSI_PF) {\n+\t\tif (!vsi->netdev) {\n+\t\t\tdev_dbg(&vsi->back->pdev->dev,\n+\t\t\t\t\"vsi->netdev is not initialized!\\n\");\n+\t\t\treturn;\n+\t\t}\n+\t\tif (link_up) {\n+\t\t\tnetif_carrier_on(vsi->netdev);\n+\t\t\tnetif_tx_wake_all_queues(vsi->netdev);\n+\t\t} else {\n+\t\t\tnetif_carrier_off(vsi->netdev);\n+\t\t\tnetif_tx_stop_all_queues(vsi->netdev);\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * ice_link_event - process the link event\n+ * @pf: pf that the link event is associated with\n+ * @pi: port_info for the port that the link event is associated with\n+ *\n+ * Returns -EIO if ice_get_link_status() fails\n+ * Returns 0 on success\n+ */\n+static int\n+ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)\n+{\n+\tu8 new_link_speed, old_link_speed;\n+\tstruct ice_phy_info *phy_info;\n+\tbool new_link_same_as_old;\n+\tbool new_link, old_link;\n+\tu8 lport;\n+\tu16 v;\n+\n+\tphy_info = &pi->phy;\n+\tphy_info->link_info_old = phy_info->link_info;\n+\t/* Force ice_get_link_status() to update link info */\n+\tphy_info->get_link_info = true;\n+\n+\told_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);\n+\told_link_speed = phy_info->link_info_old.link_speed;\n+\n+\tlport = pi->lport;\n+\tif (ice_get_link_status(pi, &new_link)) {\n+\t\tdev_dbg(&pf->pdev->dev,\n+\t\t\t\"Could not get link status for port %d\\n\", lport);\n+\t\treturn -EIO;\n+\t}\n+\n+\tnew_link_speed = phy_info->link_info.link_speed;\n+\n+\tnew_link_same_as_old = (new_link == old_link &&\n+\t\t\t\tnew_link_speed == old_link_speed);\n+\n+\tice_for_each_vsi(pf, v) {\n+\t\tstruct ice_vsi *vsi = pf->vsi[v];\n+\n+\t\tif (!vsi || !vsi->port_info)\n+\t\t\tcontinue;\n+\n+\t\tif (new_link_same_as_old &&\n+\t\t    (test_bit(__ICE_DOWN, vsi->state) ||\n+\t\t    new_link == netif_carrier_ok(vsi->netdev)))\n+\t\t\tcontinue;\n+\n+\t\tif (vsi->port_info->lport == lport) {\n+\t\t\tice_print_link_msg(vsi, new_link);\n+\t\t\tice_vsi_link_event(vsi, new_link);\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_handle_link_event - handle link event via ARQ\n+ * @pf: pf that the link event is associated with\n+ *\n+ * Return -EINVAL if port_info is null\n+ * Return status on succes\n+ */\n+static int ice_handle_link_event(struct ice_pf *pf)\n+{\n+\tstruct ice_port_info *port_info;\n+\tint status;\n+\n+\tport_info = pf->hw.port_info;\n+\tif (!port_info)\n+\t\treturn -EINVAL;\n+\n+\tstatus = ice_link_event(pf, port_info);\n+\tif (status)\n+\t\tdev_dbg(&pf->pdev->dev,\n+\t\t\t\"Could not process link event, error %d\\n\", status);\n+\n+\treturn status;\n+}\n+\n /**\n  * __ice_clean_ctrlq - helper function to clean controlq rings\n  * @pf: ptr to struct ice_pf\n@@ -342,6 +608,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)\n \tconst char *qtype;\n \tu32 oldval, val;\n \n+\t/* Do not clean control queue if/when PF reset fails */\n+\tif (test_bit(__ICE_RESET_FAILED, pf->state))\n+\t\treturn 0;\n+\n \tswitch (q_type) {\n \tcase ICE_CTL_Q_ADMIN:\n \t\tcq = &hw->adminq;\n@@ -408,6 +678,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)\n \n \tdo {\n \t\tenum ice_status ret;\n+\t\tu16 opcode;\n \n \t\tret = ice_clean_rq_elem(hw, cq, &event, &pending);\n \t\tif (ret == ICE_ERR_AQ_NO_WORK)\n@@ -418,6 +689,21 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)\n \t\t\t\tret);\n \t\t\tbreak;\n \t\t}\n+\n+\t\topcode = le16_to_cpu(event.desc.opcode);\n+\n+\t\tswitch (opcode) {\n+\t\tcase ice_aqc_opc_get_link_status:\n+\t\t\tif (ice_handle_link_event(pf))\n+\t\t\t\tdev_err(&pf->pdev->dev,\n+\t\t\t\t\t\"Could not handle link event\");\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tdev_dbg(&pf->pdev->dev,\n+\t\t\t\t\"%s Receive Queue unknown event 0x%04x ignored\\n\",\n+\t\t\t\tqtype, opcode);\n+\t\t\tbreak;\n+\t\t}\n \t} while (pending && (i++ < ICE_DFLT_IRQ_WORK));\n \n \tdevm_kfree(&pf->pdev->dev, event.msg_buf);\n@@ -497,6 +783,17 @@ static void ice_service_task(struct work_struct *work)\n \tunsigned long start_time = jiffies;\n \n \t/* subtasks */\n+\n+\t/* process reset requests first */\n+\tice_reset_subtask(pf);\n+\n+\t/* bail if a reset/recovery cycle is pending */\n+\tif (ice_is_reset_recovery_pending(pf->state) ||\n+\t    test_bit(__ICE_SUSPENDED, pf->state)) {\n+\t\tice_service_task_complete(pf);\n+\t\treturn;\n+\t}\n+\n \tice_watchdog_subtask(pf);\n \tice_clean_adminq_subtask(pf);\n \n@@ -1222,6 +1519,37 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)\n \tif (!(oicr & PFINT_OICR_INTEVENT_M))\n \t\tgoto ena_intr;\n \n+\tif (oicr & PFINT_OICR_GRST_M) {\n+\t\tu32 reset;\n+\t\t/* we have a reset warning */\n+\t\tena_mask &= ~PFINT_OICR_GRST_M;\n+\t\treset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>\n+\t\t\tGLGEN_RSTAT_RESET_TYPE_S;\n+\n+\t\tif (reset == ICE_RESET_CORER)\n+\t\t\tpf->corer_count++;\n+\t\telse if (reset == ICE_RESET_GLOBR)\n+\t\t\tpf->globr_count++;\n+\t\telse\n+\t\t\tpf->empr_count++;\n+\n+\t\t/* If a reset cycle isn't already in progress, we set a bit in\n+\t\t * pf->state so that the service task can start a reset/rebuild.\n+\t\t * We also make note of which reset happened so that peer\n+\t\t * devices/drivers can be informed.\n+\t\t */\n+\t\tif (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) {\n+\t\t\tif (reset == ICE_RESET_CORER)\n+\t\t\t\tset_bit(__ICE_CORER_RECV, pf->state);\n+\t\t\telse if (reset == ICE_RESET_GLOBR)\n+\t\t\t\tset_bit(__ICE_GLOBR_RECV, pf->state);\n+\t\t\telse\n+\t\t\t\tset_bit(__ICE_EMPR_RECV, pf->state);\n+\n+\t\t\tset_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);\n+\t\t}\n+\t}\n+\n \tif (oicr & PFINT_OICR_HMC_ERR_M) {\n \t\tena_mask &= ~PFINT_OICR_HMC_ERR_M;\n \t\tdev_dbg(&pf->pdev->dev,\n@@ -1240,9 +1568,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)\n \t\t */\n \t\tif (oicr & (PFINT_OICR_PE_CRITERR_M |\n \t\t\t    PFINT_OICR_PCI_EXCEPTION_M |\n-\t\t\t    PFINT_OICR_ECC_ERR_M))\n+\t\t\t    PFINT_OICR_ECC_ERR_M)) {\n \t\t\tset_bit(__ICE_PFR_REQ, pf->state);\n-\n+\t\t\tice_service_task_schedule(pf);\n+\t\t}\n \t\tena_mask &= ~oicr;\n \t}\n \tret = IRQ_HANDLED;\n@@ -1499,6 +1828,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)\n \t\t\t dev_driver_string(&pf->pdev->dev),\n \t\t\t dev_name(&pf->pdev->dev));\n \n+\t/* Do not request IRQ but do enable OICR interrupt since settings are\n+\t * lost during reset. Note that this function is called only during\n+\t * rebuild path and not while reset is in progress.\n+\t */\n+\tif (ice_is_reset_recovery_pending(pf->state))\n+\t\tgoto skip_req_irq;\n+\n \t/* reserve one vector in irq_tracker for misc interrupts */\n \toicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);\n \tif (oicr_idx < 0)\n@@ -1517,6 +1853,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)\n \t\treturn err;\n \t}\n \n+skip_req_irq:\n \tice_ena_misc_vector(pf);\n \n \tval = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |\n@@ -2084,6 +2421,100 @@ static int ice_vsi_cfg_rss(struct ice_vsi *vsi)\n \treturn err;\n }\n \n+/**\n+ * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI\n+ * @vsi: pointer to the ice_vsi\n+ *\n+ * This reallocates the VSIs queue resources\n+ *\n+ * Returns 0 on success and negative value on failure\n+ */\n+static int ice_vsi_reinit_setup(struct ice_vsi *vsi)\n+{\n+\tu16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };\n+\tint ret, i;\n+\n+\tif (!vsi)\n+\t\treturn -EINVAL;\n+\n+\tice_vsi_free_q_vectors(vsi);\n+\tice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);\n+\tvsi->base_vector = 0;\n+\tice_vsi_clear_rings(vsi);\n+\tice_vsi_free_arrays(vsi, false);\n+\tice_vsi_set_num_qs(vsi);\n+\n+\t/* Initialize VSI struct elements and create VSI in FW */\n+\tret = ice_vsi_add(vsi);\n+\tif (ret < 0)\n+\t\tgoto err_vsi;\n+\n+\tret = ice_vsi_alloc_arrays(vsi, false);\n+\tif (ret < 0)\n+\t\tgoto err_vsi;\n+\n+\tswitch (vsi->type) {\n+\tcase ICE_VSI_PF:\n+\t\tif (!vsi->netdev) {\n+\t\t\tret = ice_cfg_netdev(vsi);\n+\t\t\tif (ret)\n+\t\t\t\tgoto err_rings;\n+\n+\t\t\tret = register_netdev(vsi->netdev);\n+\t\t\tif (ret)\n+\t\t\t\tgoto err_rings;\n+\n+\t\t\tnetif_carrier_off(vsi->netdev);\n+\t\t\tnetif_tx_stop_all_queues(vsi->netdev);\n+\t\t}\n+\n+\t\tret = ice_vsi_alloc_q_vectors(vsi);\n+\t\tif (ret)\n+\t\t\tgoto err_rings;\n+\n+\t\tret = ice_vsi_setup_vector_base(vsi);\n+\t\tif (ret)\n+\t\t\tgoto err_vectors;\n+\n+\t\tret = ice_vsi_alloc_rings(vsi);\n+\t\tif (ret)\n+\t\t\tgoto err_vectors;\n+\n+\t\tice_vsi_map_rings_to_vectors(vsi);\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\tice_vsi_set_tc_cfg(vsi);\n+\n+\t/* configure VSI nodes based on number of queues and TC's */\n+\tfor (i = 0; i < vsi->tc_cfg.numtc; i++)\n+\t\tmax_txqs[i] = vsi->num_txq;\n+\n+\tret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,\n+\t\t\t      vsi->tc_cfg.ena_tc, max_txqs);\n+\tif (ret) {\n+\t\tdev_info(&vsi->back->pdev->dev,\n+\t\t\t \"Failed VSI lan queue config\\n\");\n+\t\tgoto err_vectors;\n+\t}\n+\treturn 0;\n+\n+err_vectors:\n+\tice_vsi_free_q_vectors(vsi);\n+err_rings:\n+\tif (vsi->netdev) {\n+\t\tunregister_netdev(vsi->netdev);\n+\t\tfree_netdev(vsi->netdev);\n+\t\tvsi->netdev = NULL;\n+\t}\n+err_vsi:\n+\tice_vsi_clear(vsi);\n+\tset_bit(__ICE_RESET_FAILED, vsi->back->state);\n+\treturn ret;\n+}\n+\n /**\n  * ice_vsi_setup - Set up a VSI by a given type\n  * @pf: board private structure\n@@ -2359,10 +2790,17 @@ static int ice_setup_pf_sw(struct ice_pf *pf)\n \tstruct ice_vsi *vsi;\n \tint status = 0;\n \n-\tvsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info);\n-\tif (!vsi) {\n-\t\tstatus = -ENOMEM;\n-\t\tgoto error_exit;\n+\tif (!ice_is_reset_recovery_pending(pf->state)) {\n+\t\tvsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info);\n+\t\tif (!vsi) {\n+\t\t\tstatus = -ENOMEM;\n+\t\t\tgoto error_exit;\n+\t\t}\n+\t} else {\n+\t\tvsi = pf->vsi[0];\n+\t\tstatus = ice_vsi_reinit_setup(vsi);\n+\t\tif (status < 0)\n+\t\t\treturn -EIO;\n \t}\n \n \t/* tmp_add_list contains a list of MAC addresses for which MAC\n@@ -2751,6 +3189,12 @@ static int ice_probe(struct pci_dev *pdev,\n \t/* since everything is good, start the service timer */\n \tmod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));\n \n+\terr = ice_init_link_events(pf->hw.port_info);\n+\tif (err) {\n+\t\tdev_err(&pdev->dev, \"ice_init_link_events failed: %d\\n\", err);\n+\t\tgoto err_alloc_sw_unroll;\n+\t}\n+\n \treturn 0;\n \n err_alloc_sw_unroll:\n@@ -4231,6 +4675,131 @@ static int ice_vsi_release(struct ice_vsi *vsi)\n \treturn 0;\n }\n \n+/**\n+ * ice_dis_vsi - pause a VSI\n+ * @vsi: the VSI being paused\n+ */\n+static void ice_dis_vsi(struct ice_vsi *vsi)\n+{\n+\tif (test_bit(__ICE_DOWN, vsi->state))\n+\t\treturn;\n+\n+\tset_bit(__ICE_NEEDS_RESTART, vsi->state);\n+\n+\tif (vsi->netdev && netif_running(vsi->netdev) &&\n+\t    vsi->type == ICE_VSI_PF)\n+\t\tvsi->netdev->netdev_ops->ndo_stop(vsi->netdev);\n+\n+\tice_vsi_close(vsi);\n+}\n+\n+/**\n+ * ice_ena_vsi - resume a VSI\n+ * @vsi: the VSI being resume\n+ */\n+static void ice_ena_vsi(struct ice_vsi *vsi)\n+{\n+\tif (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state))\n+\t\treturn;\n+\n+\tif (vsi->netdev && netif_running(vsi->netdev))\n+\t\tvsi->netdev->netdev_ops->ndo_open(vsi->netdev);\n+\telse if (ice_vsi_open(vsi))\n+\t\t/* this clears the DOWN bit */\n+\t\tdev_dbg(&vsi->back->pdev->dev, \"Failed open VSI 0x%04X on switch 0x%04X\\n\",\n+\t\t\tvsi->vsi_num, vsi->vsw->sw_id);\n+}\n+\n+/**\n+ * ice_pf_dis_all_vsi - Pause all VSIs on a PF\n+ * @pf: the PF\n+ */\n+static void ice_pf_dis_all_vsi(struct ice_pf *pf)\n+{\n+\tint v;\n+\n+\tice_for_each_vsi(pf, v)\n+\t\tif (pf->vsi[v])\n+\t\t\tice_dis_vsi(pf->vsi[v]);\n+}\n+\n+/**\n+ * ice_pf_ena_all_vsi - Resume all VSIs on a PF\n+ * @pf: the PF\n+ */\n+static void ice_pf_ena_all_vsi(struct ice_pf *pf)\n+{\n+\tint v;\n+\n+\tice_for_each_vsi(pf, v)\n+\t\tif (pf->vsi[v])\n+\t\t\tice_ena_vsi(pf->vsi[v]);\n+}\n+\n+/**\n+ * ice_rebuild - rebuild after reset\n+ * @pf: pf to rebuild\n+ */\n+static void ice_rebuild(struct ice_pf *pf)\n+{\n+\tstruct device *dev = &pf->pdev->dev;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tenum ice_status ret;\n+\tint err;\n+\n+\tif (test_bit(__ICE_DOWN, pf->state))\n+\t\tgoto clear_recovery;\n+\n+\tdev_dbg(dev, \"rebuilding pf\\n\");\n+\n+\tret = ice_init_all_ctrlq(hw);\n+\tif (ret) {\n+\t\tdev_err(dev, \"control queues init failed %d\\n\", ret);\n+\t\tgoto fail_reset;\n+\t}\n+\n+\tret = ice_clear_pf_cfg(hw);\n+\tif (ret) {\n+\t\tdev_err(dev, \"clear PF configuration failed %d\\n\", ret);\n+\t\tgoto fail_reset;\n+\t}\n+\n+\tice_clear_pxe_mode(hw);\n+\n+\tret = ice_get_caps(hw);\n+\tif (ret) {\n+\t\tdev_err(dev, \"ice_get_caps failed %d\\n\", ret);\n+\t\tgoto fail_reset;\n+\t}\n+\n+\t/* basic nic switch setup */\n+\terr = ice_setup_pf_sw(pf);\n+\tif (err) {\n+\t\tdev_err(dev, \"ice_setup_pf_sw failed\\n\");\n+\t\tgoto fail_reset;\n+\t}\n+\n+\t/* start misc vector */\n+\tif (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {\n+\t\terr = ice_req_irq_msix_misc(pf);\n+\t\tif (err) {\n+\t\t\tdev_err(dev, \"misc vector setup failed: %d\\n\", err);\n+\t\t\tgoto fail_reset;\n+\t\t}\n+\t}\n+\n+\t/* restart the VSIs that were rebuilt and running before the reset */\n+\tice_pf_ena_all_vsi(pf);\n+\n+\treturn;\n+\n+fail_reset:\n+\tice_shutdown_all_ctrlq(hw);\n+\tset_bit(__ICE_RESET_FAILED, pf->state);\n+clear_recovery:\n+\tset_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);\n+}\n+\n /**\n  * ice_set_rss - Set RSS keys and lut\n  * @vsi: Pointer to VSI structure\ndiff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h\nindex ce091e83b60a..100cb3cf8364 100644\n--- a/drivers/net/ethernet/intel/ice/ice_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_type.h\n@@ -31,6 +31,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)\n \n /* debug masks - set these bits in hw->debug_mask to control output */\n #define ICE_DBG_INIT\t\tBIT_ULL(1)\n+#define ICE_DBG_LINK\t\tBIT_ULL(4)\n #define ICE_DBG_QCTX\t\tBIT_ULL(6)\n #define ICE_DBG_NVM\t\tBIT_ULL(7)\n #define ICE_DBG_LAN\t\tBIT_ULL(8)\n",
    "prefixes": [
        "v4",
        "14/15"
    ]
}