Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1182470/?format=api
{ "id": 1182470, "url": "http://patchwork.ozlabs.org/api/patches/1182470/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191023115501.41055-2-anthony.l.nguyen@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20191023115501.41055-2-anthony.l.nguyen@intel.com>", "list_archive_url": null, "date": "2019-10-23T11:55:01", "name": "[v3,2/2] ice: Implement peer communications", "commit_ref": null, "pull_url": null, "state": "changes-requested", "archived": false, "hash": "1e963fe0da7ecb94f095a19dd9e6dc87c47bf7dc", "submitter": { "id": 68875, "url": "http://patchwork.ozlabs.org/api/people/68875/?format=api", "name": "Tony Nguyen", "email": "anthony.l.nguyen@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191023115501.41055-2-anthony.l.nguyen@intel.com/mbox/", "series": [ { "id": 138181, "url": "http://patchwork.ozlabs.org/api/series/138181/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=138181", "date": "2019-10-23T11:55:00", "name": "[v3,1/2] ice: Initialize and register multi-function device to provide RDMA", "version": 3, "mbox": "http://patchwork.ozlabs.org/series/138181/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1182470/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/1182470/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org; spf=pass (sender SPF authorized)\n\tsmtp.mailfrom=osuosl.org (client-ip=140.211.166.133;\n\thelo=hemlock.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com" ], "Received": [ "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 46z220626xz9sP6\n\tfor <incoming@patchwork.ozlabs.org>;\n\tThu, 24 Oct 2019 07:25:44 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id E43A48820F;\n\tWed, 23 Oct 2019 20:25:42 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 9PIIe2Ccx6yS; Wed, 23 Oct 2019 20:25:35 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id AC16588231;\n\tWed, 23 Oct 2019 20:25:35 +0000 (UTC)", "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\tby ash.osuosl.org (Postfix) with ESMTP id 3F3191BF5A5\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 20:25:33 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id 33A87214FD\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 20:25:33 +0000 (UTC)", "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id xR3em5LsYCyn for <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 20:25:29 +0000 (UTC)", "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n\tby silver.osuosl.org (Postfix) with ESMTPS id 2E95E1FEAE\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 20:25:29 +0000 (UTC)", "from fmsmga002.fm.intel.com ([10.253.24.26])\n\tby fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t23 Oct 2019 13:25:28 -0700", "from unknown (HELO localhost.jf.intel.com) ([10.166.244.174])\n\tby fmsmga002.fm.intel.com with ESMTP; 23 Oct 2019 13:25:28 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.68,222,1569308400\"; d=\"scan'208\";a=\"228253695\"", "From": "Tony Nguyen <anthony.l.nguyen@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Wed, 23 Oct 2019 04:55:01 -0700", "Message-Id": "<20191023115501.41055-2-anthony.l.nguyen@intel.com>", "X-Mailer": "git-send-email 2.20.1", "In-Reply-To": "<20191023115501.41055-1-anthony.l.nguyen@intel.com>", "References": "<20191023115501.41055-1-anthony.l.nguyen@intel.com>", "MIME-Version": "1.0", "Subject": "[Intel-wired-lan] [PATCH v3 2/2] ice: Implement peer communications", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "Set and implement operations for the peer device and peer driver to\ncommunicate with each other, via iidc_ops and iidc_peer_ops, to request\nresources and manage event notification.\n\nSigned-off-by: Dave Ertman <david.m.ertman@intel.com>\nSigned-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>\n---\nv3:\n- Defer opening peer if RTNL lock is held to avoid possible deadlock\nv2:\n- Remove version check\n---\n drivers/net/ethernet/intel/ice/ice.h | 2 +\n .../net/ethernet/intel/ice/ice_adminq_cmd.h | 32 +\n drivers/net/ethernet/intel/ice/ice_common.c | 188 ++++\n drivers/net/ethernet/intel/ice/ice_common.h | 9 +\n drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 34 +\n drivers/net/ethernet/intel/ice/ice_idc.c | 875 ++++++++++++++++++\n drivers/net/ethernet/intel/ice/ice_idc_int.h | 38 +\n drivers/net/ethernet/intel/ice/ice_lib.c | 34 +-\n drivers/net/ethernet/intel/ice/ice_lib.h | 2 +\n drivers/net/ethernet/intel/ice/ice_main.c | 67 +-\n drivers/net/ethernet/intel/ice/ice_sched.c | 69 +-\n drivers/net/ethernet/intel/ice/ice_switch.c | 27 +\n drivers/net/ethernet/intel/ice/ice_switch.h | 4 +\n drivers/net/ethernet/intel/ice/ice_type.h | 3 +\n .../net/ethernet/intel/ice/ice_virtchnl_pf.c | 25 -\n 15 files changed, 1376 insertions(+), 33 deletions(-)", "diff": "diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h\nindex 7160556ec55e..b8f2a6e26d0f 100644\n--- a/drivers/net/ethernet/intel/ice/ice.h\n+++ b/drivers/net/ethernet/intel/ice/ice.h\n@@ -276,6 +276,7 @@ struct ice_vsi {\n \tu16 num_rxq;\t\t\t /* Used Rx queues */\n \tu16 num_rx_desc;\n \tu16 num_tx_desc;\n+\tu16 qset_handle[ICE_MAX_TRAFFIC_CLASS];\n \tstruct ice_tc_cfg tc_cfg;\n } ____cacheline_internodealigned_in_smp;\n \n@@ -458,6 +459,7 @@ struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);\n int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);\n int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);\n void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);\n+int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);\n void ice_print_link_msg(struct ice_vsi *vsi, bool isup);\n int ice_init_peer_devices(struct ice_pf *pf);\n int\ndiff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\nindex c54e78492395..dad9a9efadfa 100644\n--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n@@ -1453,6 +1453,36 @@ struct ice_aqc_dis_txq {\n \tstruct ice_aqc_dis_txq_item qgrps[1];\n };\n \n+/* Add Tx RDMA Queue Set (indirect 0x0C33) */\n+struct ice_aqc_add_rdma_qset {\n+\tu8 num_qset_grps;\n+\tu8 reserved[7];\n+\t__le32 addr_high;\n+\t__le32 addr_low;\n+};\n+\n+/* This is the descriptor of each qset entry for the Add Tx RDMA Queue Set\n+ * command (0x0C33). Only used within struct ice_aqc_add_rdma_qset.\n+ */\n+struct ice_aqc_add_tx_rdma_qset_entry {\n+\t__le16 tx_qset_id;\n+\tu8 rsvd[2];\n+\t__le32 qset_teid;\n+\tstruct ice_aqc_txsched_elem info;\n+};\n+\n+/* The format of the command buffer for Add Tx RDMA Queue Set(0x0C33)\n+ * is an array of the following structs. Please note that the length of\n+ * each struct ice_aqc_add_rdma_qset is variable due to the variable\n+ * number of queues in each group!\n+ */\n+struct ice_aqc_add_rdma_qset_data {\n+\t__le32 parent_teid;\n+\t__le16 num_qsets;\n+\tu8 rsvd[2];\n+\tstruct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[1];\n+};\n+\n /* Configure Firmware Logging Command (indirect 0xFF09)\n * Logging Information Read Response (indirect 0xFF10)\n * Note: The 0xFF10 command has no input parameters.\n@@ -1639,6 +1669,7 @@ struct ice_aq_desc {\n \t\tstruct ice_aqc_get_set_rss_key get_set_rss_key;\n \t\tstruct ice_aqc_add_txqs add_txqs;\n \t\tstruct ice_aqc_dis_txqs dis_txqs;\n+\t\tstruct ice_aqc_add_rdma_qset add_rdma_qset;\n \t\tstruct ice_aqc_add_get_update_free_vsi vsi_cmd;\n \t\tstruct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;\n \t\tstruct ice_aqc_fw_logging fw_logging;\n@@ -1768,6 +1799,7 @@ enum ice_adminq_opc {\n \t/* Tx queue handling commands/events */\n \tice_aqc_opc_add_txqs\t\t\t\t= 0x0C30,\n \tice_aqc_opc_dis_txqs\t\t\t\t= 0x0C31,\n+\tice_aqc_opc_add_rdma_qset\t\t\t= 0x0C33,\n \n \t/* package commands */\n \tice_aqc_opc_download_pkg\t\t\t= 0x0C40,\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c\nindex ed59eec57a52..b96118c43765 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.c\n+++ b/drivers/net/ethernet/intel/ice/ice_common.c\n@@ -2923,6 +2923,59 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,\n \treturn status;\n }\n \n+/**\n+ * ice_aq_add_rdma_qsets\n+ * @hw: pointer to the hardware structure\n+ * @num_qset_grps: Number of RDMA Qset groups\n+ * @qset_list: list of qset groups to be added\n+ * @buf_size: size of buffer for indirect command\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Add Tx RDMA Qsets (0x0C33)\n+ */\n+static enum ice_status\n+ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,\n+\t\t struct ice_aqc_add_rdma_qset_data *qset_list,\n+\t\t u16 buf_size, struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_add_rdma_qset_data *list;\n+\tu16 i, sum_header_size, sum_q_size = 0;\n+\tstruct ice_aqc_add_rdma_qset *cmd;\n+\tstruct ice_aq_desc desc;\n+\n+\tcmd = &desc.params.add_rdma_qset;\n+\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);\n+\n+\tif (!qset_list)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tif (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tsum_header_size = num_qset_grps *\n+\t\t(sizeof(*qset_list) - sizeof(*qset_list->rdma_qsets));\n+\n+\tlist = qset_list;\n+\tfor (i = 0; i < num_qset_grps; i++) {\n+\t\tstruct ice_aqc_add_tx_rdma_qset_entry *qset = list->rdma_qsets;\n+\t\tu16 num_qsets = le16_to_cpu(list->num_qsets);\n+\n+\t\tsum_q_size += num_qsets * sizeof(*qset);\n+\t\tlist = (struct ice_aqc_add_rdma_qset_data *)\n+\t\t\t(qset + num_qsets);\n+\t}\n+\n+\tif (buf_size != (sum_header_size + sum_q_size))\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tdesc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);\n+\n+\tcmd->num_qset_grps = num_qset_grps;\n+\n+\treturn ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);\n+}\n+\n /* End of FW Admin Queue command wrappers */\n \n /**\n@@ -3391,6 +3444,141 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,\n \t\t\t ICE_SCHED_NODE_OWNER_LAN);\n }\n \n+/**\n+ * ice_cfg_vsi_rdma - configure the VSI RDMA queues\n+ * @pi: port information structure\n+ * @vsi_handle: software VSI handle\n+ * @tc_bitmap: TC bitmap\n+ * @max_rdmaqs: max RDMA queues array per TC\n+ *\n+ * This function adds/updates the VSI RDMA queues per TC.\n+ */\n+enum ice_status\n+ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,\n+\t\t u16 *max_rdmaqs)\n+{\n+\treturn ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,\n+\t\t\t ICE_SCHED_NODE_OWNER_RDMA);\n+}\n+\n+/**\n+ * ice_ena_vsi_rdma_qset\n+ * @pi: port information structure\n+ * @vsi_handle: software VSI handle\n+ * @tc: TC number\n+ * @rdma_qset: pointer to RDMA qset\n+ * @num_qsets: number of RDMA qsets\n+ * @qset_teid: pointer to qset node teids\n+ *\n+ * This function adds RDMA qset\n+ */\n+enum ice_status\n+ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,\n+\t\t u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)\n+{\n+\tstruct ice_aqc_txsched_elem_data node = { 0 };\n+\tstruct ice_aqc_add_rdma_qset_data *buf;\n+\tstruct ice_sched_node *parent;\n+\tenum ice_status status;\n+\tstruct ice_hw *hw;\n+\tu16 i, buf_size;\n+\n+\tif (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)\n+\t\treturn ICE_ERR_CFG;\n+\thw = pi->hw;\n+\n+\tif (!ice_is_vsi_valid(hw, vsi_handle))\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tbuf_size = sizeof(*buf) + sizeof(*buf->rdma_qsets) * (num_qsets - 1);\n+\tbuf = kzalloc(buf_size, GFP_KERNEL);\n+\tif (!buf)\n+\t\treturn ICE_ERR_NO_MEMORY;\n+\tmutex_lock(&pi->sched_lock);\n+\n+\tparent = ice_sched_get_free_qparent(pi, vsi_handle, tc,\n+\t\t\t\t\t ICE_SCHED_NODE_OWNER_RDMA);\n+\tif (!parent) {\n+\t\tstatus = ICE_ERR_PARAM;\n+\t\tgoto rdma_error_exit;\n+\t}\n+\tbuf->parent_teid = parent->info.node_teid;\n+\tnode.parent_teid = parent->info.node_teid;\n+\n+\tbuf->num_qsets = cpu_to_le16(num_qsets);\n+\tfor (i = 0; i < num_qsets; i++) {\n+\t\tbuf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);\n+\t\tbuf->rdma_qsets[i].info.valid_sections =\n+\t\t\t\t\t\tICE_AQC_ELEM_VALID_GENERIC;\n+\t}\n+\tstatus = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);\n+\tif (status) {\n+\t\tice_debug(hw, ICE_DBG_RDMA, \"add RDMA qset failed\\n\");\n+\t\tgoto rdma_error_exit;\n+\t}\n+\tnode.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;\n+\tfor (i = 0; i < num_qsets; i++) {\n+\t\tnode.node_teid = buf->rdma_qsets[i].qset_teid;\n+\t\tstatus = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,\n+\t\t\t\t\t &node);\n+\t\tif (status)\n+\t\t\tbreak;\n+\t\tqset_teid[i] = le32_to_cpu(node.node_teid);\n+\t}\n+rdma_error_exit:\n+\tmutex_unlock(&pi->sched_lock);\n+\tkfree(buf);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_dis_vsi_rdma_qset - free RDMA resources\n+ * @pi: port_info struct\n+ * @count: number of RDMA qsets to free\n+ * @qset_teid: TEID of qset node\n+ * @q_id: list of queue IDs being disabled\n+ */\n+enum ice_status\n+ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,\n+\t\t u16 *q_id)\n+{\n+\tstruct ice_aqc_dis_txq_item qg_list;\n+\tenum ice_status status = 0;\n+\tu16 qg_size;\n+\tint i;\n+\n+\tif (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)\n+\t\treturn ICE_ERR_CFG;\n+\n+\tqg_size = sizeof(qg_list);\n+\n+\tmutex_lock(&pi->sched_lock);\n+\n+\tfor (i = 0; i < count; i++) {\n+\t\tstruct ice_sched_node *node;\n+\n+\t\tnode = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);\n+\t\tif (!node)\n+\t\t\tcontinue;\n+\n+\t\tqg_list.parent_teid = node->info.parent_teid;\n+\t\tqg_list.num_qs = 1;\n+\t\tqg_list.q_id[0] =\n+\t\t\tcpu_to_le16(q_id[i] |\n+\t\t\t\t ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);\n+\n+\t\tstatus = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, qg_size,\n+\t\t\t\t\t ICE_NO_RESET, 0, NULL);\n+\t\tif (status)\n+\t\t\tbreak;\n+\n+\t\tice_free_sched_node(pi, node);\n+\t}\n+\n+\tmutex_unlock(&pi->sched_lock);\n+\treturn status;\n+}\n+\n /**\n * ice_replay_pre_init - replay pre initialization\n * @hw: pointer to the HW struct\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h\nindex c3df92f57777..347077bf8a36 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.h\n+++ b/drivers/net/ethernet/intel/ice/ice_common.h\n@@ -119,6 +119,15 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,\n \t\t struct ice_sq_cd *cd);\n \n enum ice_status\n+ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,\n+\t\t u16 *max_rdmaqs);\n+enum ice_status\n+ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,\n+\t\t u16 *rdma_qset, u16 num_qsets, u32 *qset_teid);\n+enum ice_status\n+ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,\n+\t\t u16 *q_id);\n+enum ice_status\n ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,\n \t\tu16 *q_handle, u16 *q_ids, u32 *q_teids,\n \t\tenum ice_disq_rst_src rst_src, u16 vmvf_num,\ndiff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c\nindex ed639ef5da42..a308f9d2f74b 100644\n--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c\n+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c\n@@ -148,6 +148,7 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)\n static void ice_pf_dcb_recfg(struct ice_pf *pf)\n {\n \tstruct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;\n+\tstruct iidc_event *event;\n \tu8 tc_map = 0;\n \tint v, ret;\n \n@@ -171,6 +172,36 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)\n \n \t\tice_vsi_map_rings_to_vectors(pf->vsi[v]);\n \t}\n+\tevent = kzalloc(sizeof(*event), GFP_KERNEL);\n+\tif (!event)\n+\t\treturn;\n+\n+\tset_bit(IIDC_EVENT_TC_CHANGE, event->type);\n+\tevent->reporter = NULL;\n+\tice_setup_dcb_qos_info(pf, &event->info.port_qos);\n+\tice_for_each_peer(pf, event, ice_peer_check_for_reg);\n+\tkfree(event);\n+}\n+\n+/**\n+ * ice_peer_prep_tc_change - Pre-notify RDMA Peer in blocking call of TC change\n+ * @peer_dev_int: ptr to peer device internal struct\n+ * @data: ptr to opaque data\n+ */\n+static int\n+ice_peer_prep_tc_change(struct ice_peer_dev_int *peer_dev_int,\n+\t\t\tvoid __always_unused *data)\n+{\n+\tstruct iidc_peer_dev *peer_dev;\n+\n+\tpeer_dev = ice_get_peer_dev(peer_dev_int);\n+\tif (!ice_validate_peer_dev(peer_dev))\n+\t\treturn 0;\n+\n+\tif (peer_dev->peer_ops && peer_dev->peer_ops->prep_tc_change)\n+\t\tpeer_dev->peer_ops->prep_tc_change(peer_dev);\n+\n+\treturn 0;\n }\n \n /**\n@@ -202,6 +233,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)\n \t\treturn ret;\n \t}\n \n+\t/* Notify capable peers about impending change to TCs */\n+\tice_for_each_peer(pf, NULL, ice_peer_prep_tc_change);\n+\n \t/* Store old config in case FW config fails */\n \told_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);\n \tmemcpy(old_cfg, curr_cfg, sizeof(*old_cfg));\ndiff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c\nindex 5e02321449ec..4038787b5509 100644\n--- a/drivers/net/ethernet/intel/ice/ice_idc.c\n+++ b/drivers/net/ethernet/intel/ice/ice_idc.c\n@@ -159,6 +159,60 @@ ice_peer_state_change(struct ice_peer_dev_int *peer_dev, long new_state,\n \t\tmutex_unlock(&peer_dev->peer_dev_state_mutex);\n }\n \n+/**\n+ * ice_peer_close - close a peer device\n+ * @peer_dev_int: device to close\n+ * @data: pointer to opaque data\n+ *\n+ * This function will also set the state bit for the peer to CLOSED. This\n+ * function is meant to be called from a ice_for_each_peer().\n+ */\n+int ice_peer_close(struct ice_peer_dev_int *peer_dev_int, void *data)\n+{\n+\tenum iidc_close_reason reason = *(enum iidc_close_reason *)(data);\n+\tstruct iidc_peer_dev *peer_dev;\n+\tstruct ice_pf *pf;\n+\tint i;\n+\n+\tpeer_dev = ice_get_peer_dev(peer_dev_int);\n+\t/* return 0 so ice_for_each_peer will continue closing other peers */\n+\tif (!ice_validate_peer_dev(peer_dev))\n+\t\treturn 0;\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\n+\tif (test_bit(__ICE_DOWN, pf->state) ||\n+\t test_bit(__ICE_SUSPENDED, pf->state) ||\n+\t test_bit(__ICE_NEEDS_RESTART, pf->state))\n+\t\treturn 0;\n+\n+\tmutex_lock(&peer_dev_int->peer_dev_state_mutex);\n+\n+\t/* no peer driver, already closed, closing or opening nothing to do */\n+\tif (test_bit(ICE_PEER_DEV_STATE_CLOSED, peer_dev_int->state) ||\n+\t test_bit(ICE_PEER_DEV_STATE_CLOSING, peer_dev_int->state) ||\n+\t test_bit(ICE_PEER_DEV_STATE_OPENING, peer_dev_int->state) ||\n+\t test_bit(ICE_PEER_DEV_STATE_REMOVED, peer_dev_int->state))\n+\t\tgoto peer_close_out;\n+\n+\t/* Set the peer state to CLOSING */\n+\tice_peer_state_change(peer_dev_int, ICE_PEER_DEV_STATE_CLOSING, true);\n+\n+\tfor (i = 0; i < IIDC_EVENT_NBITS; i++)\n+\t\tbitmap_zero(peer_dev_int->current_events[i].type,\n+\t\t\t IIDC_EVENT_NBITS);\n+\n+\tif (peer_dev->peer_ops && peer_dev->peer_ops->close)\n+\t\tpeer_dev->peer_ops->close(peer_dev, reason);\n+\n+\t/* Set the peer state to CLOSED */\n+\tice_peer_state_change(peer_dev_int, ICE_PEER_DEV_STATE_CLOSED, true);\n+\n+peer_close_out:\n+\tmutex_unlock(&peer_dev_int->peer_dev_state_mutex);\n+\n+\treturn 0;\n+}\n+\n /**\n * ice_peer_update_vsi - update the pf_vsi info in peer_dev struct\n * @peer_dev_int: pointer to peer dev internal struct\n@@ -177,6 +231,106 @@ int ice_peer_update_vsi(struct ice_peer_dev_int *peer_dev_int, void *data)\n \treturn 0;\n }\n \n+/**\n+ * ice_close_peer_for_reset - queue work to close peer for reset\n+ * @peer_dev_int: pointer peer dev internal struct\n+ * @data: pointer to opaque data used for reset type\n+ */\n+int ice_close_peer_for_reset(struct ice_peer_dev_int *peer_dev_int, void *data)\n+{\n+\tstruct iidc_peer_dev *peer_dev;\n+\tenum ice_reset_req reset;\n+\n+\tpeer_dev = ice_get_peer_dev(peer_dev_int);\n+\tif (!ice_validate_peer_dev(peer_dev))\n+\t\treturn 0;\n+\n+\treset = *(enum ice_reset_req *)data;\n+\n+\tswitch (reset) {\n+\tcase ICE_RESET_GLOBR:\n+\t\tpeer_dev_int->rst_type = IIDC_REASON_GLOBR_REQ;\n+\t\tbreak;\n+\tcase ICE_RESET_CORER:\n+\t\tpeer_dev_int->rst_type = IIDC_REASON_CORER_REQ;\n+\t\tbreak;\n+\tcase ICE_RESET_PFR:\n+\t\tpeer_dev_int->rst_type = IIDC_REASON_PFR_REQ;\n+\t\tbreak;\n+\tdefault:\n+\t\t/* reset type is invalid */\n+\t\treturn 1;\n+\t}\n+\tqueue_work(peer_dev_int->ice_peer_wq, &peer_dev_int->peer_close_task);\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_check_peer_drv_for_events - check peer_drv for events to report\n+ * @peer_dev: peer device to report to\n+ */\n+static void ice_check_peer_drv_for_events(struct iidc_peer_dev *peer_dev)\n+{\n+\tconst struct iidc_peer_ops *p_ops = peer_dev->peer_ops;\n+\tstruct ice_peer_dev_int *peer_dev_int;\n+\tstruct ice_peer_drv_int *peer_drv_int;\n+\tint i;\n+\n+\tpeer_dev_int = peer_to_ice_dev_int(peer_dev);\n+\tif (!peer_dev_int)\n+\t\treturn;\n+\tpeer_drv_int = peer_dev_int->peer_drv_int;\n+\n+\tfor_each_set_bit(i, peer_dev_int->events, IIDC_EVENT_NBITS) {\n+\t\tstruct iidc_event *curr = &peer_drv_int->current_events[i];\n+\n+\t\tif (!bitmap_empty(curr->type, IIDC_EVENT_NBITS) &&\n+\t\t p_ops->event_handler)\n+\t\t\tp_ops->event_handler(peer_dev, curr);\n+\t}\n+}\n+\n+/**\n+ * ice_check_peer_for_events - check peer_devs for events new peer reg'd for\n+ * @src_peer_int: peer to check for events\n+ * @data: ptr to opaque data, to be used for the peer struct that opened\n+ *\n+ * This function is to be called when a peer device is opened.\n+ *\n+ * Since a new peer opening would have missed any events that would\n+ * have happened before its opening, we need to walk the peers and see\n+ * if any of them have events that the new peer cares about\n+ *\n+ * This function is meant to be called by a device_for_each_child.\n+ */\n+static int\n+ice_check_peer_for_events(struct ice_peer_dev_int *src_peer_int, void *data)\n+{\n+\tstruct iidc_peer_dev *new_peer = (struct iidc_peer_dev *)data;\n+\tconst struct iidc_peer_ops *p_ops = new_peer->peer_ops;\n+\tstruct ice_peer_dev_int *new_peer_int;\n+\tstruct iidc_peer_dev *src_peer;\n+\tint i;\n+\n+\tsrc_peer = ice_get_peer_dev(src_peer_int);\n+\tif (!ice_validate_peer_dev(new_peer) ||\n+\t !ice_validate_peer_dev(src_peer))\n+\t\treturn 0;\n+\n+\tnew_peer_int = peer_to_ice_dev_int(new_peer);\n+\n+\tfor_each_set_bit(i, new_peer_int->events, IIDC_EVENT_NBITS) {\n+\t\tstruct iidc_event *curr = &src_peer_int->current_events[i];\n+\n+\t\tif (!bitmap_empty(curr->type, IIDC_EVENT_NBITS) &&\n+\t\t new_peer->peer_dev_id != src_peer->peer_dev_id &&\n+\t\t p_ops->event_handler)\n+\t\t\tp_ops->event_handler(new_peer, curr);\n+\t}\n+\n+\treturn 0;\n+}\n+\n /**\n * ice_for_each_peer - iterate across and call function for each peer dev\n * @pf: pointer to private board struct\n@@ -207,6 +361,90 @@ ice_for_each_peer(struct ice_pf *pf, void *data,\n \treturn 0;\n }\n \n+/**\n+ * ice_finish_init_peer_device - complete peer device initialization\n+ * @peer_dev_int: ptr to peer device internal struct\n+ * @data: ptr to opaque data\n+ *\n+ * This function completes remaining initialization of peer_devices\n+ */\n+int\n+ice_finish_init_peer_device(struct ice_peer_dev_int *peer_dev_int,\n+\t\t\t void __always_unused *data)\n+{\n+\tstruct iidc_peer_dev *peer_dev;\n+\tstruct iidc_peer_drv *peer_drv;\n+\tstruct ice_pf *pf;\n+\tint ret = 0;\n+\n+\tpeer_dev = ice_get_peer_dev(peer_dev_int);\n+\t/* peer_dev will not always be populated at the time of this check */\n+\tif (!ice_validate_peer_dev(peer_dev))\n+\t\treturn ret;\n+\n+\tpeer_drv = peer_dev->peer_drv;\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\t/* There will be several assessments of the peer_dev's state in this\n+\t * chunk of logic. We need to hold the peer_dev_int's state mutex\n+\t * for the entire part so that the flow progresses without another\n+\t * context changing things mid-flow\n+\t */\n+\tmutex_lock(&peer_dev_int->peer_dev_state_mutex);\n+\n+\tif (!peer_dev->peer_ops) {\n+\t\tdev_err(&pf->pdev->dev, \"peer_ops not defined on peer dev\\n\");\n+\t\tgoto init_unlock;\n+\t}\n+\n+\tif (!peer_dev->peer_ops->open) {\n+\t\tdev_err(&pf->pdev->dev, \"peer_ops:open not defined on peer dev\\n\");\n+\t\tgoto init_unlock;\n+\t}\n+\n+\tif (!peer_dev->peer_ops->close) {\n+\t\tdev_err(&pf->pdev->dev, \"peer_ops:close not defined on peer dev\\n\");\n+\t\tgoto init_unlock;\n+\t}\n+\n+\t/* Peer driver expected to set driver_id during registration */\n+\tif (!peer_drv->driver_id) {\n+\t\tdev_err(&pf->pdev->dev, \"Peer driver did not set driver_id\\n\");\n+\t\tgoto init_unlock;\n+\t}\n+\n+\tif ((test_bit(ICE_PEER_DEV_STATE_CLOSED, peer_dev_int->state) ||\n+\t test_bit(ICE_PEER_DEV_STATE_PROBED, peer_dev_int->state)) &&\n+\t ice_pf_state_is_nominal(pf)) {\n+\t\t/* If the RTNL is locked, we defer opening the peer\n+\t\t * until the next time this function is called by the\n+\t\t * service task.\n+\t\t */\n+\t\tif (rtnl_is_locked())\n+\t\t\tgoto init_unlock;\n+\t\tice_peer_state_change(peer_dev_int, ICE_PEER_DEV_STATE_OPENING,\n+\t\t\t\t true);\n+\t\tret = peer_dev->peer_ops->open(peer_dev);\n+\t\tif (ret) {\n+\t\t\tdev_err(&pf->pdev->dev, \"Peer %d failed to open\\n\",\n+\t\t\t\tpeer_dev->peer_dev_id);\n+\t\t\tice_peer_state_change(peer_dev_int,\n+\t\t\t\t\t ICE_PEER_DEV_STATE_PROBED, true);\n+\t\t\tgoto init_unlock;\n+\t\t}\n+\n+\t\tice_peer_state_change(peer_dev_int, ICE_PEER_DEV_STATE_OPENED,\n+\t\t\t\t true);\n+\t\tret = ice_for_each_peer(pf, peer_dev,\n+\t\t\t\t\tice_check_peer_for_events);\n+\t\tice_check_peer_drv_for_events(peer_dev);\n+\t}\n+\n+init_unlock:\n+\tmutex_unlock(&peer_dev_int->peer_dev_state_mutex);\n+\n+\treturn ret;\n+}\n+\n /**\n * ice_unreg_peer_device - unregister specified device\n * @peer_dev_int: ptr to peer device internal\n@@ -287,6 +525,605 @@ ice_unroll_peer(struct ice_peer_dev_int *peer_dev_int,\n \treturn 0;\n }\n \n+/**\n+ * ice_find_vsi - Find the VSI from VSI ID\n+ * @pf: The PF pointer to search in\n+ * @vsi_num: The VSI ID to search for\n+ */\n+static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)\n+{\n+\tint i;\n+\n+\tice_for_each_vsi(pf, i)\n+\t\tif (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)\n+\t\t\treturn pf->vsi[i];\n+\treturn NULL;\n+}\n+\n+/**\n+ * ice_peer_alloc_rdma_qsets - Allocate Leaf Nodes for RDMA Qset\n+ * @peer_dev: peer that is requesting the Leaf Nodes\n+ * @res: Resources to be allocated\n+ * @partial_acceptable: If partial allocation is acceptable to the peer\n+ *\n+ * This function allocates Leaf Nodes for given RDMA Qset resources\n+ * for the peer device.\n+ */\n+static int\n+ice_peer_alloc_rdma_qsets(struct iidc_peer_dev *peer_dev, struct iidc_res *res,\n+\t\t\t int __always_unused partial_acceptable)\n+{\n+\tu16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];\n+\tenum ice_status status;\n+\tstruct ice_vsi *vsi;\n+\tstruct ice_pf *pf;\n+\tint i, ret = 0;\n+\tu32 *qset_teid;\n+\tu16 *qs_handle;\n+\n+\tif (!ice_validate_peer_dev(peer_dev) || !res)\n+\t\treturn -EINVAL;\n+\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\n+\tif (res->cnt_req > ICE_MAX_TXQ_PER_TXQG)\n+\t\treturn -EINVAL;\n+\n+\tqset_teid = kcalloc(res->cnt_req, sizeof(*qset_teid), GFP_KERNEL);\n+\tif (!qset_teid)\n+\t\treturn -ENOMEM;\n+\n+\tqs_handle = kcalloc(res->cnt_req, sizeof(*qs_handle), GFP_KERNEL);\n+\tif (!qs_handle) {\n+\t\tkfree(qset_teid);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tice_for_each_traffic_class(i)\n+\t\tmax_rdmaqs[i] = 0;\n+\n+\tfor (i = 0; i < res->cnt_req; i++) {\n+\t\tstruct iidc_rdma_qset_params *qset;\n+\n+\t\tqset = &res->res[i].res.qsets;\n+\t\tif (qset->vsi_id != peer_dev->pf_vsi_num) {\n+\t\t\tdev_err(&pf->pdev->dev,\n+\t\t\t\t\"RDMA QSet invalid VSI requested\\n\");\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto out;\n+\t\t}\n+\t\tmax_rdmaqs[qset->tc]++;\n+\t\tqs_handle[i] = qset->qs_handle;\n+\t}\n+\n+\tvsi = ice_find_vsi(pf, peer_dev->pf_vsi_num);\n+\tif (!vsi) {\n+\t\tdev_err(&pf->pdev->dev, \"RDMA QSet invalid VSI\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto out;\n+\t}\n+\n+\tstatus = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,\n+\t\t\t\t max_rdmaqs);\n+\tif (status) {\n+\t\tdev_err(&pf->pdev->dev, \"Failed VSI RDMA qset config\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto out;\n+\t}\n+\n+\tfor (i = 0; i < res->cnt_req; i++) {\n+\t\tstruct iidc_rdma_qset_params *qset;\n+\n+\t\tqset = &res->res[i].res.qsets;\n+\t\tstatus = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx,\n+\t\t\t\t\t qset->tc, &qs_handle[i], 1,\n+\t\t\t\t\t &qset_teid[i]);\n+\t\tif (status) {\n+\t\t\tdev_err(&pf->pdev->dev,\n+\t\t\t\t\"Failed VSI RDMA qset enable\\n\");\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto out;\n+\t\t}\n+\t\tvsi->qset_handle[qset->tc] = qset->qs_handle;\n+\t\tqset->teid = qset_teid[i];\n+\t}\n+\n+out:\n+\tkfree(qset_teid);\n+\tkfree(qs_handle);\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_peer_free_rdma_qsets - Free leaf nodes for RDMA Qset\n+ * @peer_dev: peer that requested qsets to be freed\n+ * @res: Resource to be freed\n+ */\n+static int\n+ice_peer_free_rdma_qsets(struct iidc_peer_dev *peer_dev, struct iidc_res *res)\n+{\n+\tenum ice_status status;\n+\tint count, i, ret = 0;\n+\tstruct ice_vsi *vsi;\n+\tstruct ice_pf *pf;\n+\tu16 vsi_id;\n+\tu32 *teid;\n+\tu16 *q_id;\n+\n+\tif (!ice_validate_peer_dev(peer_dev) || !res)\n+\t\treturn -EINVAL;\n+\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\n+\tcount = res->res_allocated;\n+\tif (count > ICE_MAX_TXQ_PER_TXQG)\n+\t\treturn -EINVAL;\n+\n+\tteid = kcalloc(count, sizeof(*teid), GFP_KERNEL);\n+\tif (!teid)\n+\t\treturn -ENOMEM;\n+\n+\tq_id = kcalloc(count, sizeof(*q_id), GFP_KERNEL);\n+\tif (!q_id) {\n+\t\tkfree(teid);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tvsi_id = res->res[0].res.qsets.vsi_id;\n+\tvsi = ice_find_vsi(pf, vsi_id);\n+\tif (!vsi) {\n+\t\tdev_err(&pf->pdev->dev, \"RDMA Invalid VSI\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto rdma_free_out;\n+\t}\n+\n+\tfor (i = 0; i < count; i++) {\n+\t\tstruct iidc_rdma_qset_params *qset;\n+\n+\t\tqset = &res->res[i].res.qsets;\n+\t\tif (qset->vsi_id != vsi_id) {\n+\t\t\tdev_err(&pf->pdev->dev, \"RDMA Invalid VSI ID\\n\");\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto rdma_free_out;\n+\t\t}\n+\t\tq_id[i] = qset->qs_handle;\n+\t\tteid[i] = qset->teid;\n+\n+\t\tvsi->qset_handle[qset->tc] = 0;\n+\t}\n+\n+\tstatus = ice_dis_vsi_rdma_qset(vsi->port_info, count, teid, q_id);\n+\tif (status)\n+\t\tret = -EINVAL;\n+\n+rdma_free_out:\n+\tkfree(teid);\n+\tkfree(q_id);\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_peer_alloc_res - Allocate requested resources for peer device\n+ * @peer_dev: peer that is requesting resources\n+ * @res: Resources to be allocated\n+ * @partial_acceptable: If partial allocation is acceptable to the peer\n+ *\n+ * This function allocates requested resources for the peer device.\n+ */\n+static int\n+ice_peer_alloc_res(struct iidc_peer_dev *peer_dev, struct iidc_res *res,\n+\t\t int partial_acceptable)\n+{\n+\tstruct ice_pf *pf;\n+\tint ret;\n+\n+\tif (!ice_validate_peer_dev(peer_dev) || !res)\n+\t\treturn -EINVAL;\n+\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\tif (!ice_pf_state_is_nominal(pf))\n+\t\treturn -EBUSY;\n+\n+\tswitch (res->res_type) {\n+\tcase IIDC_RDMA_QSETS_TXSCHED:\n+\t\tret = ice_peer_alloc_rdma_qsets(peer_dev, res,\n+\t\t\t\t\t\tpartial_acceptable);\n+\t\tbreak;\n+\tdefault:\n+\t\tret = -EINVAL;\n+\t\tbreak;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_peer_free_res - Free given resources\n+ * @peer_dev: peer that is requesting freeing of resources\n+ * @res: Resources to be freed\n+ *\n+ * Free/Release resources allocated to given peer device.\n+ */\n+static int\n+ice_peer_free_res(struct iidc_peer_dev *peer_dev, struct iidc_res *res)\n+{\n+\tint ret;\n+\n+\tif (!ice_validate_peer_dev(peer_dev) || !res)\n+\t\treturn -EINVAL;\n+\n+\tswitch (res->res_type) {\n+\tcase IIDC_RDMA_QSETS_TXSCHED:\n+\t\tret = ice_peer_free_rdma_qsets(peer_dev, res);\n+\t\tbreak;\n+\tdefault:\n+\t\tret = -EINVAL;\n+\t\tbreak;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_peer_reg_for_notif - register a peer to receive specific notifications\n+ * @peer_dev: peer that is registering for event notifications\n+ * @events: mask of event types peer is registering for\n+ */\n+static void\n+ice_peer_reg_for_notif(struct iidc_peer_dev *peer_dev,\n+\t\t struct iidc_event *events)\n+{\n+\tstruct ice_peer_dev_int *peer_dev_int;\n+\tstruct ice_pf *pf;\n+\n+\tif (!ice_validate_peer_dev(peer_dev) || !events)\n+\t\treturn;\n+\n+\tpeer_dev_int = peer_to_ice_dev_int(peer_dev);\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\n+\tbitmap_or(peer_dev_int->events, peer_dev_int->events, events->type,\n+\t\t IIDC_EVENT_NBITS);\n+\n+\t/* Check to see if any events happened previous to peer registering */\n+\tice_for_each_peer(pf, peer_dev, ice_check_peer_for_events);\n+\tice_check_peer_drv_for_events(peer_dev);\n+}\n+\n+/**\n+ * ice_peer_unreg_for_notif - unreg a peer from receiving certain notifications\n+ * @peer_dev: peer that is unregistering from event notifications\n+ * @events: mask of event types peer is unregistering for\n+ */\n+static void\n+ice_peer_unreg_for_notif(struct iidc_peer_dev *peer_dev,\n+\t\t\t struct iidc_event *events)\n+{\n+\tstruct ice_peer_dev_int *peer_dev_int;\n+\n+\tif (!ice_validate_peer_dev(peer_dev) || !events)\n+\t\treturn;\n+\n+\tpeer_dev_int = peer_to_ice_dev_int(peer_dev);\n+\n+\tbitmap_andnot(peer_dev_int->events, peer_dev_int->events, events->type,\n+\t\t IIDC_EVENT_NBITS);\n+}\n+\n+/**\n+ * ice_peer_check_for_reg - check to see if any peers are reg'd for event\n+ * @peer_dev_int: ptr to peer device internal struct\n+ * @data: ptr to opaque data, to be used for ice_event to report\n+ *\n+ * This function is to be called by device_for_each_child to handle an\n+ * event reported by a peer or the ice driver.\n+ */\n+int ice_peer_check_for_reg(struct ice_peer_dev_int *peer_dev_int, void *data)\n+{\n+\tstruct iidc_event *event = (struct iidc_event *)data;\n+\tDECLARE_BITMAP(comp_events, IIDC_EVENT_NBITS);\n+\tstruct iidc_peer_dev *peer_dev;\n+\tbool check = true;\n+\n+\tpeer_dev = ice_get_peer_dev(peer_dev_int);\n+\n+\tif (!ice_validate_peer_dev(peer_dev) || !data)\n+\t/* If invalid dev, in this case return 0 instead of error\n+\t * because caller ignores this return value\n+\t */\n+\t\treturn 0;\n+\n+\tif (event->reporter)\n+\t\tcheck = event->reporter->peer_dev_id != peer_dev->peer_dev_id;\n+\n+\tif (bitmap_and(comp_events, event->type, peer_dev_int->events,\n+\t\t IIDC_EVENT_NBITS) &&\n+\t (test_bit(ICE_PEER_DEV_STATE_OPENED, peer_dev_int->state) ||\n+\t test_bit(ICE_PEER_DEV_STATE_PREP_RST, peer_dev_int->state) ||\n+\t test_bit(ICE_PEER_DEV_STATE_PREPPED, peer_dev_int->state)) &&\n+\t check &&\n+\t peer_dev->peer_ops->event_handler)\n+\t\tpeer_dev->peer_ops->event_handler(peer_dev, event);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_peer_report_state_change - accept report of a peer state change\n+ * @peer_dev: peer that is sending notification about state change\n+ * @event: ice_event holding info on what the state change is\n+ *\n+ * We also need to parse the list of peers to see if anyone is registered\n+ * for notifications about this state change event, and if so, notify them.\n+ */\n+static void\n+ice_peer_report_state_change(struct iidc_peer_dev *peer_dev,\n+\t\t\t struct iidc_event *event)\n+{\n+\tstruct ice_peer_dev_int *peer_dev_int;\n+\tstruct ice_peer_drv_int *peer_drv_int;\n+\tint e_type, drv_event = 0;\n+\tstruct ice_pf *pf;\n+\n+\tif (!ice_validate_peer_dev(peer_dev) || !event)\n+\t\treturn;\n+\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\tpeer_dev_int = peer_to_ice_dev_int(peer_dev);\n+\tpeer_drv_int = peer_dev_int->peer_drv_int;\n+\n+\te_type = find_first_bit(event->type, IIDC_EVENT_NBITS);\n+\tif (!e_type)\n+\t\treturn;\n+\n+\tswitch (e_type) {\n+\t/* Check for peer_drv events */\n+\tcase IIDC_EVENT_MBX_CHANGE:\n+\t\tdrv_event = 1;\n+\t\tif (event->info.mbx_rdy)\n+\t\t\tset_bit(ICE_PEER_DRV_STATE_MBX_RDY,\n+\t\t\t\tpeer_drv_int->state);\n+\t\telse\n+\t\t\tclear_bit(ICE_PEER_DRV_STATE_MBX_RDY,\n+\t\t\t\t peer_drv_int->state);\n+\t\tbreak;\n+\n+\t/* Check for peer_dev events */\n+\tcase IIDC_EVENT_API_CHANGE:\n+\t\tif (event->info.api_rdy)\n+\t\t\tset_bit(ICE_PEER_DEV_STATE_API_RDY,\n+\t\t\t\tpeer_dev_int->state);\n+\t\telse\n+\t\t\tclear_bit(ICE_PEER_DEV_STATE_API_RDY,\n+\t\t\t\t peer_dev_int->state);\n+\t\tbreak;\n+\n+\tdefault:\n+\t\treturn;\n+\t}\n+\n+\t/* store the event and state to notify any new peers opening */\n+\tif (drv_event)\n+\t\tmemcpy(&peer_drv_int->current_events[e_type], event,\n+\t\t sizeof(*event));\n+\telse\n+\t\tmemcpy(&peer_dev_int->current_events[e_type], event,\n+\t\t sizeof(*event));\n+\n+\tice_for_each_peer(pf, event, ice_peer_check_for_reg);\n+}\n+\n+/**\n+ * ice_peer_unregister - request to unregister peer\n+ * @peer_dev: peer device\n+ *\n+ * This function triggers close/remove on peer_dev allowing peer\n+ * to unregister.\n+ */\n+static int ice_peer_unregister(struct iidc_peer_dev *peer_dev)\n+{\n+\tenum iidc_close_reason reason = IIDC_REASON_PEER_DEV_UNINIT;\n+\tstruct ice_peer_dev_int *peer_dev_int;\n+\tstruct ice_pf *pf;\n+\tint ret;\n+\n+\tif (!ice_validate_peer_dev(peer_dev))\n+\t\treturn -EINVAL;\n+\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\tif (ice_is_reset_in_progress(pf->state))\n+\t\treturn -EBUSY;\n+\n+\tpeer_dev_int = peer_to_ice_dev_int(peer_dev);\n+\n+\tret = ice_peer_close(peer_dev_int, &reason);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tpeer_dev->peer_ops = NULL;\n+\n+\tice_peer_state_change(peer_dev_int, ICE_PEER_DEV_STATE_REMOVED, false);\n+\n+\tmodule_put(THIS_MODULE);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_peer_register - Called by peer to open communication with LAN\n+ * @peer_dev: ptr to peer device\n+ *\n+ * registering peer is expected to populate the ice_peerdrv->name field\n+ * before calling this function.\n+ */\n+static int ice_peer_register(struct iidc_peer_dev *peer_dev)\n+{\n+\tstruct ice_peer_drv_int *peer_drv_int;\n+\tstruct ice_peer_dev_int *peer_dev_int;\n+\tstruct iidc_peer_drv *peer_drv;\n+\n+\tif (!peer_dev) {\n+\t\tpr_err(\"Failed to reg peer dev: peer_dev ptr NULL\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (!peer_dev->pdev) {\n+\t\tpr_err(\"Failed to reg peer dev: peer dev pdev NULL\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (!peer_dev->peer_ops || !peer_dev->ops) {\n+\t\tpr_err(\"Failed to reg peer dev: peer dev peer_ops/ops NULL\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tpeer_drv = peer_dev->peer_drv;\n+\tif (!peer_drv) {\n+\t\tpr_err(\"Failed to reg peer dev: peer drv NULL\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tpeer_dev_int = peer_to_ice_dev_int(peer_dev);\n+\tpeer_drv_int = peer_dev_int->peer_drv_int;\n+\tif (!peer_drv_int) {\n+\t\tpr_err(\"Failed to match peer_drv_int to peer_dev\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tpeer_drv_int->peer_drv = peer_drv;\n+\n+\tice_peer_state_change(peer_dev_int, ICE_PEER_DEV_STATE_PROBED, false);\n+\n+\tif (!try_module_get(THIS_MODULE)) {\n+\t\tpr_err(\"Failed to increment module use count\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_peer_request_reset - accept request from peer to perform a reset\n+ * @peer_dev: peer device that is request a reset\n+ * @reset_type: type of reset the peer is requesting\n+ */\n+static int\n+ice_peer_request_reset(struct iidc_peer_dev *peer_dev,\n+\t\t enum iidc_peer_reset_type reset_type)\n+{\n+\tenum ice_reset_req reset;\n+\tstruct ice_pf *pf;\n+\n+\tif (!ice_validate_peer_dev(peer_dev))\n+\t\treturn -EINVAL;\n+\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\n+\tswitch (reset_type) {\n+\tcase IIDC_PEER_PFR:\n+\t\treset = ICE_RESET_PFR;\n+\t\tbreak;\n+\tcase IIDC_PEER_CORER:\n+\t\treset = ICE_RESET_CORER;\n+\t\tbreak;\n+\tcase IIDC_PEER_GLOBR:\n+\t\treset = ICE_RESET_GLOBR;\n+\t\tbreak;\n+\tdefault:\n+\t\tdev_err(&pf->pdev->dev, \"incorrect reset request from peer\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn ice_schedule_reset(pf, reset);\n+}\n+\n+/**\n+ * ice_peer_is_vsi_ready - query if VSI in nominal state\n+ * @peer_dev: pointer to iidc_peer_dev struct\n+ */\n+static int ice_peer_is_vsi_ready(struct iidc_peer_dev *peer_dev)\n+{\n+\tDECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };\n+\tstruct ice_netdev_priv *np;\n+\tstruct ice_vsi *vsi;\n+\n+\t/* If the peer_dev or associated values are not valid, then return\n+\t * 0 as there is no ready port associated with the values passed in\n+\t * as parameters.\n+\t */\n+\n+\tif (!ice_validate_peer_dev(peer_dev))\n+\t\treturn 0;\n+\n+\tif (!peer_dev->netdev)\n+\t\treturn 0;\n+\n+\tnp = netdev_priv(peer_dev->netdev);\n+\tvsi = np->vsi;\n+\tif (!vsi)\n+\t\treturn 0;\n+\n+\tbitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);\n+\tif (bitmap_intersects(vsi->state, check_bits, __ICE_STATE_NBITS))\n+\t\treturn 0;\n+\n+\treturn 1;\n+}\n+\n+/**\n+ * ice_peer_update_vsi_filter - update main VSI filters for RDMA\n+ * @peer_dev: pointer to RDMA peer device\n+ * @filter: selection of filters to enable or disable\n+ * @enable: bool whether to enable or disable filters\n+ */\n+static int\n+ice_peer_update_vsi_filter(struct iidc_peer_dev *peer_dev,\n+\t\t\t enum iidc_rdma_filter __always_unused filter,\n+\t\t\t bool enable)\n+{\n+\tstruct ice_vsi *vsi;\n+\tstruct ice_pf *pf;\n+\tint ret;\n+\n+\tif (!ice_validate_peer_dev(peer_dev))\n+\t\treturn -EINVAL;\n+\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\n+\tvsi = ice_get_main_vsi(pf);\n+\tif (!vsi)\n+\t\treturn -EINVAL;\n+\n+\tret = ice_cfg_iwarp_fltr(&pf->hw, vsi->idx, enable);\n+\n+\tif (ret) {\n+\t\tdev_err(&pf->pdev->dev, \"Failed to %sable iWARP filtering\\n\",\n+\t\t\tenable ? \"en\" : \"dis\");\n+\t} else {\n+\t\tif (enable)\n+\t\t\tvsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;\n+\t\telse\n+\t\t\tvsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/* Initialize the ice_ops struct, which is used in 'ice_init_peer_devices' */\n+static const struct iidc_ops ops = {\n+\t.alloc_res\t\t\t= ice_peer_alloc_res,\n+\t.free_res\t\t\t= ice_peer_free_res,\n+\t.is_vsi_ready\t\t\t= ice_peer_is_vsi_ready,\n+\t.reg_for_notification\t\t= ice_peer_reg_for_notif,\n+\t.unreg_for_notification\t\t= ice_peer_unreg_for_notif,\n+\t.notify_state_change\t\t= ice_peer_report_state_change,\n+\t.request_reset\t\t\t= ice_peer_request_reset,\n+\t.peer_register\t\t\t= ice_peer_register,\n+\t.peer_unregister\t\t= ice_peer_unregister,\n+\t.update_vsi_filter\t\t= ice_peer_update_vsi_filter,\n+};\n+\n /**\n * ice_reserve_peer_qvector - Reserve vector resources for peer drivers\n * @pf: board private structure to initialize\n@@ -306,6 +1143,41 @@ static int ice_reserve_peer_qvector(struct ice_pf *pf)\n \treturn 0;\n }\n \n+/**\n+ * ice_peer_close_task - call peer's close asynchronously\n+ * @work: pointer to work_struct contained by the peer_dev_int struct\n+ *\n+ * This method (asynchronous) of calling a peer's close function is\n+ * meant to be used in the reset path.\n+ */\n+static void ice_peer_close_task(struct work_struct *work)\n+{\n+\tstruct ice_peer_dev_int *peer_dev_int;\n+\tstruct iidc_peer_dev *peer_dev;\n+\n+\tpeer_dev_int = container_of(work, struct ice_peer_dev_int,\n+\t\t\t\t peer_close_task);\n+\n+\tpeer_dev = ice_get_peer_dev(peer_dev_int);\n+\tif (!peer_dev || !peer_dev->peer_ops)\n+\t\treturn;\n+\n+\t/* If this peer_dev is going to close, we do not want any state changes\n+\t * to happen until after we successfully finish or abort the close.\n+\t * Grab the peer_dev_state_mutex to protect this flow\n+\t */\n+\tmutex_lock(&peer_dev_int->peer_dev_state_mutex);\n+\n+\tice_peer_state_change(peer_dev_int, ICE_PEER_DEV_STATE_CLOSING, true);\n+\n+\tif (peer_dev->peer_ops->close)\n+\t\tpeer_dev->peer_ops->close(peer_dev, peer_dev_int->rst_type);\n+\n+\tice_peer_state_change(peer_dev_int, ICE_PEER_DEV_STATE_CLOSED, true);\n+\n+\tmutex_unlock(&peer_dev_int->peer_dev_state_mutex);\n+}\n+\n /**\n * ice_init_peer_devices - initializes peer devices\n * @pf: ptr to ice_pf\n@@ -375,6 +1247,7 @@ int ice_init_peer_devices(struct ice_pf *pf)\n \t\t\t\t\t\ti);\n \t\tif (!peer_dev_int->ice_peer_wq)\n \t\t\treturn -ENOMEM;\n+\t\tINIT_WORK(&peer_dev_int->peer_close_task, ice_peer_close_task);\n \n \t\tpeer_dev->pdev = pdev;\n \t\tqos_info = &peer_dev->initial_qos_info;\n@@ -392,6 +1265,8 @@ int ice_init_peer_devices(struct ice_pf *pf)\n \n \t\t/* for DCB, override the qos_info defaults. */\n \t\tice_setup_dcb_qos_info(pf, qos_info);\n+\t\t/* Initialize ice_ops */\n+\t\tpeer_dev->ops = &ops;\n \n \t\t/* make sure peer specific resources such as msix_count and\n \t\t * msix_entries are initialized\ndiff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h\nindex 26ecd45faf16..e1d50a027e5c 100644\n--- a/drivers/net/ethernet/intel/ice/ice_idc_int.h\n+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h\n@@ -67,8 +67,20 @@ struct ice_peer_dev_int {\n };\n \n int ice_peer_update_vsi(struct ice_peer_dev_int *peer_dev_int, void *data);\n+int ice_close_peer_for_reset(struct ice_peer_dev_int *peer_dev_int, void *data);\n int ice_unroll_peer(struct ice_peer_dev_int *peer_dev_int, void *data);\n int ice_unreg_peer_device(struct ice_peer_dev_int *peer_dev_int, void *data);\n+int ice_peer_close(struct ice_peer_dev_int *peer_dev_int, void *data);\n+int ice_peer_check_for_reg(struct ice_peer_dev_int *peer_dev_int, void *data);\n+int\n+ice_finish_init_peer_device(struct ice_peer_dev_int *peer_dev_int, void *data);\n+\n+static inline struct\n+ice_peer_dev_int *peer_to_ice_dev_int(struct iidc_peer_dev *peer_dev)\n+{\n+\treturn peer_dev ? container_of(peer_dev, struct ice_peer_dev_int,\n+\t\t\t\t peer_dev) : NULL;\n+}\n \n static inline struct\n iidc_peer_dev *ice_get_peer_dev(struct ice_peer_dev_int *peer_dev_int)\n@@ -78,4 +90,30 @@ iidc_peer_dev *ice_get_peer_dev(struct ice_peer_dev_int *peer_dev_int)\n \telse\n \t\treturn NULL;\n }\n+\n+static inline bool ice_validate_peer_dev(struct iidc_peer_dev *peer_dev)\n+{\n+\tstruct ice_peer_dev_int *peer_dev_int;\n+\tstruct ice_pf *pf;\n+\n+\tif (!peer_dev || !peer_dev->pdev)\n+\t\treturn false;\n+\n+\tif (!peer_dev->peer_ops)\n+\t\treturn false;\n+\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\tif (!pf)\n+\t\treturn false;\n+\n+\tpeer_dev_int = peer_to_ice_dev_int(peer_dev);\n+\tif (!peer_dev_int)\n+\t\treturn false;\n+\n+\tif (test_bit(ICE_PEER_DEV_STATE_REMOVED, peer_dev_int->state) ||\n+\t test_bit(ICE_PEER_DEV_STATE_INIT, peer_dev_int->state))\n+\t\treturn false;\n+\n+\treturn true;\n+}\n #endif /* !_ICE_IDC_INT_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c\nindex d3d317df34f0..6d191711caee 100644\n--- a/drivers/net/ethernet/intel/ice/ice_lib.c\n+++ b/drivers/net/ethernet/intel/ice/ice_lib.c\n@@ -1537,6 +1537,30 @@ int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,\n \treturn 0;\n }\n \n+/**\n+ * ice_pf_state_is_nominal - checks the PF for nominal state\n+ * @pf: pointer to PF to check\n+ *\n+ * Check the PF's state for a collection of bits that would indicate\n+ * the PF is in a state that would inhibit normal operation for\n+ * driver functionality.\n+ *\n+ * Returns true if PF is in a nominal state, false otherwise\n+ */\n+bool ice_pf_state_is_nominal(struct ice_pf *pf)\n+{\n+\tDECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };\n+\n+\tif (!pf)\n+\t\treturn false;\n+\n+\tbitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);\n+\tif (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))\n+\t\treturn false;\n+\n+\treturn true;\n+}\n+\n /**\n * ice_update_eth_stats - Update VSI-specific ethernet statistics counters\n * @vsi: the VSI to be updated\n@@ -2792,9 +2816,17 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi)\n */\n void ice_vsi_close(struct ice_vsi *vsi)\n {\n+\tenum iidc_close_reason reason = IIDC_REASON_INTERFACE_DOWN;\n+\tstruct device *dev = &vsi->back->pdev->dev;\n+\tint ret = 0;\n+\n+\tif (vsi->type == ICE_VSI_PF)\n+\t\tret = ice_for_each_peer(vsi->back, &reason, ice_peer_close);\n+\n+\tif (ret)\n+\t\tdev_dbg(dev, \"Peer device did not implement close function\\n\");\n \tif (!test_and_set_bit(__ICE_DOWN, vsi->state))\n \t\tice_down(vsi);\n-\n \tice_vsi_free_irq(vsi);\n \tice_vsi_free_tx_rings(vsi);\n \tice_vsi_free_rx_rings(vsi);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h\nindex 613a732ceb09..e4ddd315b1b2 100644\n--- a/drivers/net/ethernet/intel/ice/ice_lib.h\n+++ b/drivers/net/ethernet/intel/ice/ice_lib.h\n@@ -25,6 +25,8 @@ ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,\n \n void ice_free_fltr_list(struct device *dev, struct list_head *h);\n \n+bool ice_pf_state_is_nominal(struct ice_pf *pf);\n+\n void ice_update_eth_stats(struct ice_vsi *vsi);\n \n int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c\nindex cfdfcd44735f..2d7af5e6f688 100644\n--- a/drivers/net/ethernet/intel/ice/ice_main.c\n+++ b/drivers/net/ethernet/intel/ice/ice_main.c\n@@ -586,6 +586,9 @@ static void ice_reset_subtask(struct ice_pf *pf)\n \t\t/* return if no valid reset type requested */\n \t\tif (reset_type == ICE_RESET_INVAL)\n \t\t\treturn;\n+\t\tif (ice_is_peer_ena(pf))\n+\t\t\tice_for_each_peer(pf, &reset_type,\n+\t\t\t\t\t ice_close_peer_for_reset);\n \t\tice_prepare_for_reset(pf);\n \n \t\t/* make sure we are ready to rebuild */\n@@ -1516,6 +1519,9 @@ static void ice_service_task(struct work_struct *work)\n \t\treturn;\n \t}\n \n+\t/* Invoke remaining initialization of peer devices */\n+\tice_for_each_peer(pf, NULL, ice_finish_init_peer_device);\n+\n \tice_process_vflr_event(pf);\n \tice_clean_mailboxq_subtask(pf);\n \n@@ -1550,6 +1556,42 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)\n \thw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;\n }\n \n+/**\n+ * ice_schedule_reset - schedule a reset\n+ * @pf: board private structure\n+ * @reset: reset being requested\n+ */\n+int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)\n+{\n+\t/* bail out if earlier reset has failed */\n+\tif (test_bit(__ICE_RESET_FAILED, pf->state)) {\n+\t\tdev_dbg(&pf->pdev->dev, \"earlier reset has failed\\n\");\n+\t\treturn -EIO;\n+\t}\n+\t/* bail if reset/recovery already in progress */\n+\tif (ice_is_reset_in_progress(pf->state)) {\n+\t\tdev_dbg(&pf->pdev->dev, \"Reset already in progress\\n\");\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tswitch (reset) {\n+\tcase ICE_RESET_PFR:\n+\t\tset_bit(__ICE_PFR_REQ, pf->state);\n+\t\tbreak;\n+\tcase ICE_RESET_CORER:\n+\t\tset_bit(__ICE_CORER_REQ, pf->state);\n+\t\tbreak;\n+\tcase ICE_RESET_GLOBR:\n+\t\tset_bit(__ICE_GLOBR_REQ, pf->state);\n+\t\tbreak;\n+\tdefault:\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tice_service_task_schedule(pf);\n+\treturn 0;\n+}\n+\n /**\n * ice_irq_affinity_notify - Callback for affinity changes\n * @notify: context as to what irq was changed\n@@ -3052,6 +3094,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)\n static void ice_remove(struct pci_dev *pdev)\n {\n \tstruct ice_pf *pf = pci_get_drvdata(pdev);\n+\tenum iidc_close_reason reason;\n \tint i;\n \n \tif (!pf)\n@@ -3063,13 +3106,21 @@ static void ice_remove(struct pci_dev *pdev)\n \t\tmsleep(100);\n \t}\n \n-\tset_bit(__ICE_DOWN, pf->state);\n \tice_service_task_stop(pf);\n+\tif (ice_is_peer_ena(pf)) {\n+\t\treason = IIDC_REASON_INTERFACE_DOWN;\n+\t\tice_for_each_peer(pf, &reason, ice_peer_close);\n+\t}\n+\tset_bit(__ICE_DOWN, pf->state);\n \n \tif (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))\n \t\tice_free_vfs(pf);\n \tice_vsi_release_all(pf);\n-\tice_for_each_peer(pf, NULL, ice_unreg_peer_device);\n+\tif (ice_is_peer_ena(pf)) {\n+\t\tida_simple_remove(&ice_peer_index_ida, pf->peer_idx);\n+\t\tice_for_each_peer(pf, NULL, ice_unreg_peer_device);\n+\t\tdevm_kfree(&pdev->dev, pf->peers);\n+\t}\n \tice_free_irq_msix_misc(pf);\n \tice_for_each_vsi(pf, i) {\n \t\tif (!pf->vsi[i])\n@@ -4406,6 +4457,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)\n \tstruct ice_netdev_priv *np = netdev_priv(netdev);\n \tstruct ice_vsi *vsi = np->vsi;\n \tstruct ice_pf *pf = vsi->back;\n+\tstruct iidc_event *event;\n \tu8 count = 0;\n \n \tif (new_mtu == netdev->mtu) {\n@@ -4457,6 +4509,17 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)\n \t\t}\n \t}\n \n+\tif (ice_is_safe_mode(pf))\n+\t\tgoto out;\n+\n+\tevent = kzalloc(sizeof(*event), GFP_KERNEL);\n+\tset_bit(IIDC_EVENT_MTU_CHANGE, event->type);\n+\tevent->reporter = NULL;\n+\tevent->info.mtu = new_mtu;\n+\tice_for_each_peer(pf, event, ice_peer_check_for_reg);\n+\tkfree(event);\n+\n+out:\n \tnetdev_info(netdev, \"changed MTU to %d\\n\", new_mtu);\n \treturn 0;\n }\ndiff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c\nindex fc624b73d05d..2012e33214f5 100644\n--- a/drivers/net/ethernet/intel/ice/ice_sched.c\n+++ b/drivers/net/ethernet/intel/ice/ice_sched.c\n@@ -556,6 +556,50 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)\n \treturn 0;\n }\n \n+/**\n+ * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC\n+ * @hw: pointer to the HW struct\n+ * @vsi_handle: VSI handle\n+ * @tc: TC number\n+ * @new_numqs: number of queues\n+ */\n+static enum ice_status\n+ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)\n+{\n+\tstruct ice_vsi_ctx *vsi_ctx;\n+\tstruct ice_q_ctx *q_ctx;\n+\n+\tvsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);\n+\tif (!vsi_ctx)\n+\t\treturn ICE_ERR_PARAM;\n+\t/* allocate RDMA queue contexts */\n+\tif (!vsi_ctx->rdma_q_ctx[tc]) {\n+\t\tvsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),\n+\t\t\t\t\t\t new_numqs,\n+\t\t\t\t\t\t sizeof(*q_ctx),\n+\t\t\t\t\t\t GFP_KERNEL);\n+\t\tif (!vsi_ctx->rdma_q_ctx[tc])\n+\t\t\treturn ICE_ERR_NO_MEMORY;\n+\t\tvsi_ctx->num_rdma_q_entries[tc] = new_numqs;\n+\t\treturn 0;\n+\t}\n+\t/* num queues are increased, update the queue contexts */\n+\tif (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {\n+\t\tu16 prev_num = vsi_ctx->num_rdma_q_entries[tc];\n+\n+\t\tq_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,\n+\t\t\t\t sizeof(*q_ctx), GFP_KERNEL);\n+\t\tif (!q_ctx)\n+\t\t\treturn ICE_ERR_NO_MEMORY;\n+\t\tmemcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],\n+\t\t prev_num * sizeof(*q_ctx));\n+\t\tdevm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);\n+\t\tvsi_ctx->rdma_q_ctx[tc] = q_ctx;\n+\t\tvsi_ctx->num_rdma_q_entries[tc] = new_numqs;\n+\t}\n+\treturn 0;\n+}\n+\n /**\n * ice_sched_clear_agg - clears the aggregator related information\n * @hw: pointer to the hardware structure\n@@ -1432,13 +1476,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,\n \tif (!vsi_ctx)\n \t\treturn ICE_ERR_PARAM;\n \n-\tprev_numqs = vsi_ctx->sched.max_lanq[tc];\n+\tif (owner == ICE_SCHED_NODE_OWNER_LAN)\n+\t\tprev_numqs = vsi_ctx->sched.max_lanq[tc];\n+\telse\n+\t\tprev_numqs = vsi_ctx->sched.max_rdmaq[tc];\n \t/* num queues are not changed or less than the previous number */\n \tif (new_numqs <= prev_numqs)\n \t\treturn status;\n-\tstatus = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);\n-\tif (status)\n-\t\treturn status;\n+\tif (owner == ICE_SCHED_NODE_OWNER_LAN) {\n+\t\tstatus = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);\n+\t\tif (status)\n+\t\t\treturn status;\n+\t} else {\n+\t\tstatus = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);\n+\t\tif (status)\n+\t\t\treturn status;\n+\t}\n \n \tif (new_numqs)\n \t\tice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);\n@@ -1453,7 +1506,10 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,\n \t\t\t\t\t new_num_nodes, owner);\n \tif (status)\n \t\treturn status;\n-\tvsi_ctx->sched.max_lanq[tc] = new_numqs;\n+\tif (owner == ICE_SCHED_NODE_OWNER_LAN)\n+\t\tvsi_ctx->sched.max_lanq[tc] = new_numqs;\n+\telse\n+\t\tvsi_ctx->sched.max_rdmaq[tc] = new_numqs;\n \n \treturn 0;\n }\n@@ -1519,6 +1575,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,\n \t\t * recreate the child nodes all the time in these cases.\n \t\t */\n \t\tvsi_ctx->sched.max_lanq[tc] = 0;\n+\t\tvsi_ctx->sched.max_rdmaq[tc] = 0;\n \t}\n \n \t/* update the VSI child nodes */\n@@ -1650,6 +1707,8 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)\n \t\t}\n \t\tif (owner == ICE_SCHED_NODE_OWNER_LAN)\n \t\t\tvsi_ctx->sched.max_lanq[i] = 0;\n+\t\telse\n+\t\t\tvsi_ctx->sched.max_rdmaq[i] = 0;\n \t}\n \tstatus = 0;\n \ndiff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c\nindex 1acdd43a2edd..1d055a62b842 100644\n--- a/drivers/net/ethernet/intel/ice/ice_switch.c\n+++ b/drivers/net/ethernet/intel/ice/ice_switch.c\n@@ -346,6 +346,10 @@ static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)\n \t\t\tdevm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);\n \t\t\tvsi->lan_q_ctx[i] = NULL;\n \t\t}\n+\t\tif (vsi->rdma_q_ctx[i]) {\n+\t\t\tdevm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);\n+\t\t\tvsi->rdma_q_ctx[i] = NULL;\n+\t\t}\n \t}\n }\n \n@@ -467,6 +471,29 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,\n \treturn ice_aq_update_vsi(hw, vsi_ctx, cd);\n }\n \n+/**\n+ * ice_cfg_iwarp_fltr - enable/disable iWARP filtering on VSI\n+ * @hw: pointer to HW struct\n+ * @vsi_handle: VSI SW index\n+ * @enable: boolean for enable/disable\n+ */\n+enum ice_status\n+ice_cfg_iwarp_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)\n+{\n+\tstruct ice_vsi_ctx *ctx;\n+\n+\tctx = ice_get_vsi_ctx(hw, vsi_handle);\n+\tif (!ctx)\n+\t\treturn ICE_ERR_DOES_NOT_EXIST;\n+\n+\tif (enable)\n+\t\tctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;\n+\telse\n+\t\tctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;\n+\n+\treturn ice_update_vsi(hw, vsi_handle, ctx, NULL);\n+}\n+\n /**\n * ice_aq_alloc_free_vsi_list\n * @hw: pointer to the HW struct\ndiff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h\nindex cb123fbe30be..a81a9dd509d7 100644\n--- a/drivers/net/ethernet/intel/ice/ice_switch.h\n+++ b/drivers/net/ethernet/intel/ice/ice_switch.h\n@@ -31,6 +31,8 @@ struct ice_vsi_ctx {\n \tu8 vf_num;\n \tu16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];\n \tstruct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];\n+\tu16 num_rdma_q_entries[ICE_MAX_TRAFFIC_CLASS];\n+\tstruct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS];\n };\n \n enum ice_sw_fwd_act_type {\n@@ -225,6 +227,8 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);\n enum ice_status\n ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);\n enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);\n+enum ice_status\n+ice_cfg_iwarp_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);\n \n /* Promisc/defport setup for VSIs */\n enum ice_status\ndiff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h\nindex d3e44a220d5d..53fcb9b18e78 100644\n--- a/drivers/net/ethernet/intel/ice/ice_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_type.h\n@@ -32,6 +32,7 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)\n #define ICE_DBG_LAN\t\tBIT_ULL(8)\n #define ICE_DBG_SW\t\tBIT_ULL(13)\n #define ICE_DBG_SCHED\t\tBIT_ULL(14)\n+#define ICE_DBG_RDMA\t\tBIT_ULL(15)\n #define ICE_DBG_PKG\t\tBIT_ULL(16)\n #define ICE_DBG_RES\t\tBIT_ULL(17)\n #define ICE_DBG_AQ_MSG\t\tBIT_ULL(24)\n@@ -257,6 +258,7 @@ struct ice_sched_node {\n \tu8 tc_num;\n \tu8 owner;\n #define ICE_SCHED_NODE_OWNER_LAN\t0\n+#define ICE_SCHED_NODE_OWNER_RDMA\t2\n };\n \n /* Access Macros for Tx Sched Elements data */\n@@ -282,6 +284,7 @@ struct ice_sched_vsi_info {\n \tstruct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];\n \tstruct list_head list_entry;\n \tu16 max_lanq[ICE_MAX_TRAFFIC_CLASS];\n+\tu16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS];\n };\n \n /* driver defines the policy */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c\nindex b45797f39b2f..284b24a51a76 100644\n--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c\n+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c\n@@ -1364,31 +1364,6 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)\n \treturn ret;\n }\n \n-/**\n- * ice_pf_state_is_nominal - checks the PF for nominal state\n- * @pf: pointer to PF to check\n- *\n- * Check the PF's state for a collection of bits that would indicate\n- * the PF is in a state that would inhibit normal operation for\n- * driver functionality.\n- *\n- * Returns true if PF is in a nominal state.\n- * Returns false otherwise\n- */\n-static bool ice_pf_state_is_nominal(struct ice_pf *pf)\n-{\n-\tDECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };\n-\n-\tif (!pf)\n-\t\treturn false;\n-\n-\tbitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);\n-\tif (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))\n-\t\treturn false;\n-\n-\treturn true;\n-}\n-\n /**\n * ice_pci_sriov_ena - Enable or change number of VFs\n * @pf: pointer to the PF structure\n", "prefixes": [ "v3", "2/2" ] }