Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1282821/?format=api
{ "id": 1282821, "url": "http://patchwork.ozlabs.org/api/patches/1282821/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20200504164349.1523441-4-jeffrey.t.kirsher@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20200504164349.1523441-4-jeffrey.t.kirsher@intel.com>", "list_archive_url": null, "date": "2020-05-04T16:43:44", "name": "[net-next,v3,4/9] ice: Support resource allocation requests", "commit_ref": null, "pull_url": null, "state": "changes-requested", "archived": false, "hash": "4a4ab7848f2ec37a50735d472dcdb6d4ff21b34b", "submitter": { "id": 473, "url": "http://patchwork.ozlabs.org/api/people/473/?format=api", "name": "Kirsher, Jeffrey T", "email": "jeffrey.t.kirsher@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20200504164349.1523441-4-jeffrey.t.kirsher@intel.com/mbox/", "series": [ { "id": 174549, "url": "http://patchwork.ozlabs.org/api/series/174549/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=174549", "date": "2020-05-04T16:43:44", "name": "[net-next,v3,1/9] Implementation of Virtual Bus", "version": 3, "mbox": "http://patchwork.ozlabs.org/series/174549/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1282821/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/1282821/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=osuosl.org\n (client-ip=140.211.166.137; helo=fraxinus.osuosl.org;\n envelope-from=intel-wired-lan-bounces@osuosl.org; receiver=<UNKNOWN>)", "ozlabs.org;\n dmarc=fail (p=none dis=none) header.from=intel.com" ], "Received": [ "from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 49G7wd4Kg1z9sSg\n\tfor <incoming@patchwork.ozlabs.org>; Tue, 5 May 2020 02:44:01 +1000 (AEST)", "from localhost (localhost [127.0.0.1])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id E3362871C8;\n\tMon, 4 May 2020 16:43:59 +0000 (UTC)", "from fraxinus.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id SM-T8H8fwKRa; Mon, 4 May 2020 16:43:57 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id DFFF686B11;\n\tMon, 4 May 2020 16:43:57 +0000 (UTC)", "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n by ash.osuosl.org (Postfix) with ESMTP id 83AE31BF857\n for <intel-wired-lan@lists.osuosl.org>; Mon, 4 May 2020 16:43:54 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n by hemlock.osuosl.org (Postfix) with ESMTP id 7A59688378\n for <intel-wired-lan@lists.osuosl.org>; Mon, 4 May 2020 16:43:54 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n with ESMTP id RFiFEP5YZwE7 for <intel-wired-lan@lists.osuosl.org>;\n Mon, 4 May 2020 16:43:52 +0000 (UTC)", "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n by hemlock.osuosl.org (Postfix) with ESMTPS id C9F2188437\n for <intel-wired-lan@lists.osuosl.org>; Mon, 4 May 2020 16:43:52 +0000 (UTC)", "from fmsmga006.fm.intel.com ([10.253.24.20])\n by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 04 May 2020 09:43:51 -0700", "from jtkirshe-desk1.jf.intel.com ([134.134.177.86])\n by fmsmga006.fm.intel.com with ESMTP; 04 May 2020 09:43:51 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "IronPort-SDR": [ "\n CMCl8l/bYPSa2suOQQZYwHBwKH6yW4pKIUhS0FdUPTY5RmA7pAav/yTBnidhAJfRS5/+UGjwNq\n +GgU+wwVKk+w==", "\n KAsi7sAoJEtavl3zOEHpwePlBXZ8rwo2/skTHjYskEXj+JR+IN11MD7f4+5wzr2JDgQZ7+Jxup\n 2MiIDjjs9Upw==" ], "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.73,352,1583222400\"; d=\"scan'208\";a=\"461073920\"", "From": "Jeff Kirsher <jeffrey.t.kirsher@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Mon, 4 May 2020 09:43:44 -0700", "Message-Id": "<20200504164349.1523441-4-jeffrey.t.kirsher@intel.com>", "X-Mailer": "git-send-email 2.26.2", "In-Reply-To": "<20200504164349.1523441-1-jeffrey.t.kirsher@intel.com>", "References": "<20200504164349.1523441-1-jeffrey.t.kirsher@intel.com>", "MIME-Version": "1.0", "Subject": "[Intel-wired-lan] [net-next v3 4/9] ice: Support resource\n allocation requests", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n <intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>,\n <mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>,\n <mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "From: Dave Ertman <david.m.ertman@intel.com>\n\nEnable the peer device to request queue sets from the PF.\n\nSigned-off-by: Dave Ertman <david.m.ertman@intel.com>\nSigned-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>\nTested-by: Andrew Bowers <andrewx.bowers@intel.com>\nSigned-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>\n---\n drivers/net/ethernet/intel/ice/ice.h | 1 +\n .../net/ethernet/intel/ice/ice_adminq_cmd.h | 32 +++\n drivers/net/ethernet/intel/ice/ice_common.c | 188 ++++++++++++++\n drivers/net/ethernet/intel/ice/ice_common.h | 9 +\n drivers/net/ethernet/intel/ice/ice_idc.c | 244 ++++++++++++++++++\n drivers/net/ethernet/intel/ice/ice_sched.c | 69 ++++-\n drivers/net/ethernet/intel/ice/ice_switch.c | 4 +\n drivers/net/ethernet/intel/ice/ice_switch.h | 2 +\n drivers/net/ethernet/intel/ice/ice_type.h | 3 +\n 9 files changed, 547 insertions(+), 5 deletions(-)", "diff": "diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h\nindex 73366009ef03..6ad1894eca3f 100644\n--- a/drivers/net/ethernet/intel/ice/ice.h\n+++ b/drivers/net/ethernet/intel/ice/ice.h\n@@ -296,6 +296,7 @@ struct ice_vsi {\n \tu16 req_rxq;\t\t\t /* User requested Rx queues */\n \tu16 num_rx_desc;\n \tu16 num_tx_desc;\n+\tu16 qset_handle[ICE_MAX_TRAFFIC_CLASS];\n \tstruct ice_tc_cfg tc_cfg;\n \tstruct bpf_prog *xdp_prog;\n \tstruct ice_ring **xdp_rings;\t /* XDP ring array */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\nindex 51baab0621a2..a1066c4bf40d 100644\n--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n@@ -1536,6 +1536,36 @@ struct ice_aqc_dis_txq {\n \tstruct ice_aqc_dis_txq_item qgrps[1];\n };\n \n+/* Add Tx RDMA Queue Set (indirect 0x0C33) */\n+struct ice_aqc_add_rdma_qset {\n+\tu8 num_qset_grps;\n+\tu8 reserved[7];\n+\t__le32 addr_high;\n+\t__le32 addr_low;\n+};\n+\n+/* This is the descriptor of each qset entry for the Add Tx RDMA Queue Set\n+ * command (0x0C33). Only used within struct ice_aqc_add_rdma_qset.\n+ */\n+struct ice_aqc_add_tx_rdma_qset_entry {\n+\t__le16 tx_qset_id;\n+\tu8 rsvd[2];\n+\t__le32 qset_teid;\n+\tstruct ice_aqc_txsched_elem info;\n+};\n+\n+/* The format of the command buffer for Add Tx RDMA Queue Set(0x0C33)\n+ * is an array of the following structs. Please note that the length of\n+ * each struct ice_aqc_add_rdma_qset is variable due to the variable\n+ * number of queues in each group!\n+ */\n+struct ice_aqc_add_rdma_qset_data {\n+\t__le32 parent_teid;\n+\t__le16 num_qsets;\n+\tu8 rsvd[2];\n+\tstruct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[1];\n+};\n+\n /* Configure Firmware Logging Command (indirect 0xFF09)\n * Logging Information Read Response (indirect 0xFF10)\n * Note: The 0xFF10 command has no input parameters.\n@@ -1732,6 +1762,7 @@ struct ice_aq_desc {\n \t\tstruct ice_aqc_get_set_rss_key get_set_rss_key;\n \t\tstruct ice_aqc_add_txqs add_txqs;\n \t\tstruct ice_aqc_dis_txqs dis_txqs;\n+\t\tstruct ice_aqc_add_rdma_qset add_rdma_qset;\n \t\tstruct ice_aqc_add_get_update_free_vsi vsi_cmd;\n \t\tstruct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;\n \t\tstruct ice_aqc_fw_logging fw_logging;\n@@ -1867,6 +1898,7 @@ enum ice_adminq_opc {\n \t/* Tx queue handling commands/events */\n \tice_aqc_opc_add_txqs\t\t\t\t= 0x0C30,\n \tice_aqc_opc_dis_txqs\t\t\t\t= 0x0C31,\n+\tice_aqc_opc_add_rdma_qset\t\t\t= 0x0C33,\n \n \t/* package commands */\n \tice_aqc_opc_download_pkg\t\t\t= 0x0C40,\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c\nindex 2dca49aed5bb..c760fae4aed4 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.c\n+++ b/drivers/net/ethernet/intel/ice/ice_common.c\n@@ -2917,6 +2917,59 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,\n \treturn status;\n }\n \n+/**\n+ * ice_aq_add_rdma_qsets\n+ * @hw: pointer to the hardware structure\n+ * @num_qset_grps: Number of RDMA Qset groups\n+ * @qset_list: list of qset groups to be added\n+ * @buf_size: size of buffer for indirect command\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Add Tx RDMA Qsets (0x0C33)\n+ */\n+static enum ice_status\n+ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,\n+\t\t struct ice_aqc_add_rdma_qset_data *qset_list,\n+\t\t u16 buf_size, struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_add_rdma_qset_data *list;\n+\tu16 i, sum_header_size, sum_q_size = 0;\n+\tstruct ice_aqc_add_rdma_qset *cmd;\n+\tstruct ice_aq_desc desc;\n+\n+\tcmd = &desc.params.add_rdma_qset;\n+\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);\n+\n+\tif (!qset_list)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tif (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tsum_header_size = num_qset_grps *\n+\t\t(sizeof(*qset_list) - sizeof(*qset_list->rdma_qsets));\n+\n+\tlist = qset_list;\n+\tfor (i = 0; i < num_qset_grps; i++) {\n+\t\tstruct ice_aqc_add_tx_rdma_qset_entry *qset = list->rdma_qsets;\n+\t\tu16 num_qsets = le16_to_cpu(list->num_qsets);\n+\n+\t\tsum_q_size += num_qsets * sizeof(*qset);\n+\t\tlist = (struct ice_aqc_add_rdma_qset_data *)\n+\t\t\t(qset + num_qsets);\n+\t}\n+\n+\tif (buf_size != (sum_header_size + sum_q_size))\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tdesc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);\n+\n+\tcmd->num_qset_grps = num_qset_grps;\n+\n+\treturn ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);\n+}\n+\n /* End of FW Admin Queue command wrappers */\n \n /**\n@@ -3388,6 +3441,141 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,\n \t\t\t ICE_SCHED_NODE_OWNER_LAN);\n }\n \n+/**\n+ * ice_cfg_vsi_rdma - configure the VSI RDMA queues\n+ * @pi: port information structure\n+ * @vsi_handle: software VSI handle\n+ * @tc_bitmap: TC bitmap\n+ * @max_rdmaqs: max RDMA queues array per TC\n+ *\n+ * This function adds/updates the VSI RDMA queues per TC.\n+ */\n+enum ice_status\n+ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,\n+\t\t u16 *max_rdmaqs)\n+{\n+\treturn ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,\n+\t\t\t ICE_SCHED_NODE_OWNER_RDMA);\n+}\n+\n+/**\n+ * ice_ena_vsi_rdma_qset\n+ * @pi: port information structure\n+ * @vsi_handle: software VSI handle\n+ * @tc: TC number\n+ * @rdma_qset: pointer to RDMA qset\n+ * @num_qsets: number of RDMA qsets\n+ * @qset_teid: pointer to qset node teids\n+ *\n+ * This function adds RDMA qset\n+ */\n+enum ice_status\n+ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,\n+\t\t u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)\n+{\n+\tstruct ice_aqc_txsched_elem_data node = { 0 };\n+\tstruct ice_aqc_add_rdma_qset_data *buf;\n+\tstruct ice_sched_node *parent;\n+\tenum ice_status status;\n+\tstruct ice_hw *hw;\n+\tu16 i, buf_size;\n+\n+\tif (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)\n+\t\treturn ICE_ERR_CFG;\n+\thw = pi->hw;\n+\n+\tif (!ice_is_vsi_valid(hw, vsi_handle))\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tbuf_size = struct_size(buf, rdma_qsets, num_qsets - 1);\n+\tbuf = kzalloc(buf_size, GFP_KERNEL);\n+\tif (!buf)\n+\t\treturn ICE_ERR_NO_MEMORY;\n+\tmutex_lock(&pi->sched_lock);\n+\n+\tparent = ice_sched_get_free_qparent(pi, vsi_handle, tc,\n+\t\t\t\t\t ICE_SCHED_NODE_OWNER_RDMA);\n+\tif (!parent) {\n+\t\tstatus = ICE_ERR_PARAM;\n+\t\tgoto rdma_error_exit;\n+\t}\n+\tbuf->parent_teid = parent->info.node_teid;\n+\tnode.parent_teid = parent->info.node_teid;\n+\n+\tbuf->num_qsets = cpu_to_le16(num_qsets);\n+\tfor (i = 0; i < num_qsets; i++) {\n+\t\tbuf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);\n+\t\tbuf->rdma_qsets[i].info.valid_sections =\n+\t\t\t\t\t\tICE_AQC_ELEM_VALID_GENERIC;\n+\t}\n+\tstatus = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);\n+\tif (status) {\n+\t\tice_debug(hw, ICE_DBG_RDMA, \"add RDMA qset failed\\n\");\n+\t\tgoto rdma_error_exit;\n+\t}\n+\tnode.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;\n+\tfor (i = 0; i < num_qsets; i++) {\n+\t\tnode.node_teid = buf->rdma_qsets[i].qset_teid;\n+\t\tstatus = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,\n+\t\t\t\t\t &node);\n+\t\tif (status)\n+\t\t\tbreak;\n+\t\tqset_teid[i] = le32_to_cpu(node.node_teid);\n+\t}\n+rdma_error_exit:\n+\tmutex_unlock(&pi->sched_lock);\n+\tkfree(buf);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_dis_vsi_rdma_qset - free RDMA resources\n+ * @pi: port_info struct\n+ * @count: number of RDMA qsets to free\n+ * @qset_teid: TEID of qset node\n+ * @q_id: list of queue IDs being disabled\n+ */\n+enum ice_status\n+ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,\n+\t\t u16 *q_id)\n+{\n+\tstruct ice_aqc_dis_txq_item qg_list;\n+\tenum ice_status status = 0;\n+\tu16 qg_size;\n+\tint i;\n+\n+\tif (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)\n+\t\treturn ICE_ERR_CFG;\n+\n+\tqg_size = sizeof(qg_list);\n+\n+\tmutex_lock(&pi->sched_lock);\n+\n+\tfor (i = 0; i < count; i++) {\n+\t\tstruct ice_sched_node *node;\n+\n+\t\tnode = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);\n+\t\tif (!node)\n+\t\t\tcontinue;\n+\n+\t\tqg_list.parent_teid = node->info.parent_teid;\n+\t\tqg_list.num_qs = 1;\n+\t\tqg_list.q_id[0] =\n+\t\t\tcpu_to_le16(q_id[i] |\n+\t\t\t\t ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);\n+\n+\t\tstatus = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, qg_size,\n+\t\t\t\t\t ICE_NO_RESET, 0, NULL);\n+\t\tif (status)\n+\t\t\tbreak;\n+\n+\t\tice_free_sched_node(pi, node);\n+\t}\n+\n+\tmutex_unlock(&pi->sched_lock);\n+\treturn status;\n+}\n+\n /**\n * ice_replay_pre_init - replay pre initialization\n * @hw: pointer to the HW struct\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h\nindex 8104f3d64d96..db63fd6b5608 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.h\n+++ b/drivers/net/ethernet/intel/ice/ice_common.h\n@@ -125,6 +125,15 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,\n \t\t bool write, struct ice_sq_cd *cd);\n \n enum ice_status\n+ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,\n+\t\t u16 *max_rdmaqs);\n+enum ice_status\n+ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,\n+\t\t u16 *rdma_qset, u16 num_qsets, u32 *qset_teid);\n+enum ice_status\n+ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,\n+\t\t u16 *q_id);\n+enum ice_status\n ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,\n \t\tu16 *q_handle, u16 *q_ids, u32 *q_teids,\n \t\tenum ice_disq_rst_src rst_src, u16 vmvf_num,\ndiff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c\nindex 499c1b77dfc9..05fa5c61e2d3 100644\n--- a/drivers/net/ethernet/intel/ice/ice_idc.c\n+++ b/drivers/net/ethernet/intel/ice/ice_idc.c\n@@ -388,6 +388,248 @@ ice_unroll_peer(struct ice_peer_dev_int *peer_dev_int,\n \treturn 0;\n }\n \n+/**\n+ * ice_find_vsi - Find the VSI from VSI ID\n+ * @pf: The PF pointer to search in\n+ * @vsi_num: The VSI ID to search for\n+ */\n+static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)\n+{\n+\tint i;\n+\n+\tice_for_each_vsi(pf, i)\n+\t\tif (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)\n+\t\t\treturn pf->vsi[i];\n+\treturn NULL;\n+}\n+\n+/**\n+ * ice_peer_alloc_rdma_qsets - Allocate Leaf Nodes for RDMA Qset\n+ * @peer_dev: peer that is requesting the Leaf Nodes\n+ * @res: Resources to be allocated\n+ * @partial_acceptable: If partial allocation is acceptable to the peer\n+ *\n+ * This function allocates Leaf Nodes for given RDMA Qset resources\n+ * for the peer device.\n+ */\n+static int\n+ice_peer_alloc_rdma_qsets(struct iidc_peer_dev *peer_dev, struct iidc_res *res,\n+\t\t\t int __always_unused partial_acceptable)\n+{\n+\tu16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];\n+\tenum ice_status status;\n+\tstruct ice_vsi *vsi;\n+\tstruct device *dev;\n+\tstruct ice_pf *pf;\n+\tint i, ret = 0;\n+\tu32 *qset_teid;\n+\tu16 *qs_handle;\n+\n+\tif (!ice_validate_peer_dev(peer_dev) || !res)\n+\t\treturn -EINVAL;\n+\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\tdev = ice_pf_to_dev(pf);\n+\n+\tif (res->cnt_req > ICE_MAX_TXQ_PER_TXQG)\n+\t\treturn -EINVAL;\n+\n+\tqset_teid = kcalloc(res->cnt_req, sizeof(*qset_teid), GFP_KERNEL);\n+\tif (!qset_teid)\n+\t\treturn -ENOMEM;\n+\n+\tqs_handle = kcalloc(res->cnt_req, sizeof(*qs_handle), GFP_KERNEL);\n+\tif (!qs_handle) {\n+\t\tkfree(qset_teid);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tice_for_each_traffic_class(i)\n+\t\tmax_rdmaqs[i] = 0;\n+\n+\tfor (i = 0; i < res->cnt_req; i++) {\n+\t\tstruct iidc_rdma_qset_params *qset;\n+\n+\t\tqset = &res->res[i].res.qsets;\n+\t\tif (qset->vsi_id != peer_dev->pf_vsi_num) {\n+\t\t\tdev_err(dev, \"RDMA QSet invalid VSI requested\\n\");\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto out;\n+\t\t}\n+\t\tmax_rdmaqs[qset->tc]++;\n+\t\tqs_handle[i] = qset->qs_handle;\n+\t}\n+\n+\tvsi = ice_find_vsi(pf, peer_dev->pf_vsi_num);\n+\tif (!vsi) {\n+\t\tdev_err(dev, \"RDMA QSet invalid VSI\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto out;\n+\t}\n+\n+\tstatus = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,\n+\t\t\t\t max_rdmaqs);\n+\tif (status) {\n+\t\tdev_err(dev, \"Failed VSI RDMA qset config\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto out;\n+\t}\n+\n+\tfor (i = 0; i < res->cnt_req; i++) {\n+\t\tstruct iidc_rdma_qset_params *qset;\n+\n+\t\tqset = &res->res[i].res.qsets;\n+\t\tstatus = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx,\n+\t\t\t\t\t qset->tc, &qs_handle[i], 1,\n+\t\t\t\t\t &qset_teid[i]);\n+\t\tif (status) {\n+\t\t\tdev_err(dev, \"Failed VSI RDMA qset enable\\n\");\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto out;\n+\t\t}\n+\t\tvsi->qset_handle[qset->tc] = qset->qs_handle;\n+\t\tqset->teid = qset_teid[i];\n+\t}\n+\n+out:\n+\tkfree(qset_teid);\n+\tkfree(qs_handle);\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_peer_free_rdma_qsets - Free leaf nodes for RDMA Qset\n+ * @peer_dev: peer that requested qsets to be freed\n+ * @res: Resource to be freed\n+ */\n+static int\n+ice_peer_free_rdma_qsets(struct iidc_peer_dev *peer_dev, struct iidc_res *res)\n+{\n+\tenum ice_status status;\n+\tint count, i, ret = 0;\n+\tstruct ice_vsi *vsi;\n+\tstruct device *dev;\n+\tstruct ice_pf *pf;\n+\tu16 vsi_id;\n+\tu32 *teid;\n+\tu16 *q_id;\n+\n+\tif (!ice_validate_peer_dev(peer_dev) || !res)\n+\t\treturn -EINVAL;\n+\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\tdev = ice_pf_to_dev(pf);\n+\n+\tcount = res->res_allocated;\n+\tif (count > ICE_MAX_TXQ_PER_TXQG)\n+\t\treturn -EINVAL;\n+\n+\tteid = kcalloc(count, sizeof(*teid), GFP_KERNEL);\n+\tif (!teid)\n+\t\treturn -ENOMEM;\n+\n+\tq_id = kcalloc(count, sizeof(*q_id), GFP_KERNEL);\n+\tif (!q_id) {\n+\t\tkfree(teid);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tvsi_id = res->res[0].res.qsets.vsi_id;\n+\tvsi = ice_find_vsi(pf, vsi_id);\n+\tif (!vsi) {\n+\t\tdev_err(dev, \"RDMA Invalid VSI\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto rdma_free_out;\n+\t}\n+\n+\tfor (i = 0; i < count; i++) {\n+\t\tstruct iidc_rdma_qset_params *qset;\n+\n+\t\tqset = &res->res[i].res.qsets;\n+\t\tif (qset->vsi_id != vsi_id) {\n+\t\t\tdev_err(dev, \"RDMA Invalid VSI ID\\n\");\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto rdma_free_out;\n+\t\t}\n+\t\tq_id[i] = qset->qs_handle;\n+\t\tteid[i] = qset->teid;\n+\n+\t\tvsi->qset_handle[qset->tc] = 0;\n+\t}\n+\n+\tstatus = ice_dis_vsi_rdma_qset(vsi->port_info, count, teid, q_id);\n+\tif (status)\n+\t\tret = -EINVAL;\n+\n+rdma_free_out:\n+\tkfree(teid);\n+\tkfree(q_id);\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_peer_alloc_res - Allocate requested resources for peer device\n+ * @peer_dev: peer that is requesting resources\n+ * @res: Resources to be allocated\n+ * @partial_acceptable: If partial allocation is acceptable to the peer\n+ *\n+ * This function allocates requested resources for the peer device.\n+ */\n+static int\n+ice_peer_alloc_res(struct iidc_peer_dev *peer_dev, struct iidc_res *res,\n+\t\t int partial_acceptable)\n+{\n+\tstruct ice_pf *pf;\n+\tint ret;\n+\n+\tif (!ice_validate_peer_dev(peer_dev) || !res)\n+\t\treturn -EINVAL;\n+\n+\tpf = pci_get_drvdata(peer_dev->pdev);\n+\tif (!ice_pf_state_is_nominal(pf))\n+\t\treturn -EBUSY;\n+\n+\tswitch (res->res_type) {\n+\tcase IIDC_RDMA_QSETS_TXSCHED:\n+\t\tret = ice_peer_alloc_rdma_qsets(peer_dev, res,\n+\t\t\t\t\t\tpartial_acceptable);\n+\t\tbreak;\n+\tdefault:\n+\t\tret = -EINVAL;\n+\t\tbreak;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_peer_free_res - Free given resources\n+ * @peer_dev: peer that is requesting freeing of resources\n+ * @res: Resources to be freed\n+ *\n+ * Free/Release resources allocated to given peer device.\n+ */\n+static int\n+ice_peer_free_res(struct iidc_peer_dev *peer_dev, struct iidc_res *res)\n+{\n+\tint ret;\n+\n+\tif (!ice_validate_peer_dev(peer_dev) || !res)\n+\t\treturn -EINVAL;\n+\n+\tswitch (res->res_type) {\n+\tcase IIDC_RDMA_QSETS_TXSCHED:\n+\t\tret = ice_peer_free_rdma_qsets(peer_dev, res);\n+\t\tbreak;\n+\tdefault:\n+\t\tret = -EINVAL;\n+\t\tbreak;\n+\t}\n+\n+\treturn ret;\n+}\n+\n /**\n * ice_peer_unregister - request to unregister peer\n * @peer_dev: peer device\n@@ -511,6 +753,8 @@ ice_peer_update_vsi_filter(struct iidc_peer_dev *peer_dev,\n \n /* Initialize the ice_ops struct, which is used in 'ice_init_peer_devices' */\n static const struct iidc_ops ops = {\n+\t.alloc_res\t\t\t= ice_peer_alloc_res,\n+\t.free_res\t\t\t= ice_peer_free_res,\n \t.peer_register\t\t\t= ice_peer_register,\n \t.peer_unregister\t\t= ice_peer_unregister,\n \t.update_vsi_filter\t\t= ice_peer_update_vsi_filter,\ndiff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c\nindex eae707ddf8e8..2f618d051b56 100644\n--- a/drivers/net/ethernet/intel/ice/ice_sched.c\n+++ b/drivers/net/ethernet/intel/ice/ice_sched.c\n@@ -577,6 +577,50 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)\n \treturn 0;\n }\n \n+/**\n+ * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC\n+ * @hw: pointer to the HW struct\n+ * @vsi_handle: VSI handle\n+ * @tc: TC number\n+ * @new_numqs: number of queues\n+ */\n+static enum ice_status\n+ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)\n+{\n+\tstruct ice_vsi_ctx *vsi_ctx;\n+\tstruct ice_q_ctx *q_ctx;\n+\n+\tvsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);\n+\tif (!vsi_ctx)\n+\t\treturn ICE_ERR_PARAM;\n+\t/* allocate RDMA queue contexts */\n+\tif (!vsi_ctx->rdma_q_ctx[tc]) {\n+\t\tvsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),\n+\t\t\t\t\t\t new_numqs,\n+\t\t\t\t\t\t sizeof(*q_ctx),\n+\t\t\t\t\t\t GFP_KERNEL);\n+\t\tif (!vsi_ctx->rdma_q_ctx[tc])\n+\t\t\treturn ICE_ERR_NO_MEMORY;\n+\t\tvsi_ctx->num_rdma_q_entries[tc] = new_numqs;\n+\t\treturn 0;\n+\t}\n+\t/* num queues are increased, update the queue contexts */\n+\tif (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {\n+\t\tu16 prev_num = vsi_ctx->num_rdma_q_entries[tc];\n+\n+\t\tq_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,\n+\t\t\t\t sizeof(*q_ctx), GFP_KERNEL);\n+\t\tif (!q_ctx)\n+\t\t\treturn ICE_ERR_NO_MEMORY;\n+\t\tmemcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],\n+\t\t prev_num * sizeof(*q_ctx));\n+\t\tdevm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);\n+\t\tvsi_ctx->rdma_q_ctx[tc] = q_ctx;\n+\t\tvsi_ctx->num_rdma_q_entries[tc] = new_numqs;\n+\t}\n+\treturn 0;\n+}\n+\n /**\n * ice_aq_rl_profile - performs a rate limiting task\n * @hw: pointer to the HW struct\n@@ -1599,13 +1643,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,\n \tif (!vsi_ctx)\n \t\treturn ICE_ERR_PARAM;\n \n-\tprev_numqs = vsi_ctx->sched.max_lanq[tc];\n+\tif (owner == ICE_SCHED_NODE_OWNER_LAN)\n+\t\tprev_numqs = vsi_ctx->sched.max_lanq[tc];\n+\telse\n+\t\tprev_numqs = vsi_ctx->sched.max_rdmaq[tc];\n \t/* num queues are not changed or less than the previous number */\n \tif (new_numqs <= prev_numqs)\n \t\treturn status;\n-\tstatus = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);\n-\tif (status)\n-\t\treturn status;\n+\tif (owner == ICE_SCHED_NODE_OWNER_LAN) {\n+\t\tstatus = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);\n+\t\tif (status)\n+\t\t\treturn status;\n+\t} else {\n+\t\tstatus = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);\n+\t\tif (status)\n+\t\t\treturn status;\n+\t}\n \n \tif (new_numqs)\n \t\tice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);\n@@ -1620,7 +1673,10 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,\n \t\t\t\t\t new_num_nodes, owner);\n \tif (status)\n \t\treturn status;\n-\tvsi_ctx->sched.max_lanq[tc] = new_numqs;\n+\tif (owner == ICE_SCHED_NODE_OWNER_LAN)\n+\t\tvsi_ctx->sched.max_lanq[tc] = new_numqs;\n+\telse\n+\t\tvsi_ctx->sched.max_rdmaq[tc] = new_numqs;\n \n \treturn 0;\n }\n@@ -1686,6 +1742,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,\n \t\t * recreate the child nodes all the time in these cases.\n \t\t */\n \t\tvsi_ctx->sched.max_lanq[tc] = 0;\n+\t\tvsi_ctx->sched.max_rdmaq[tc] = 0;\n \t}\n \n \t/* update the VSI child nodes */\n@@ -1817,6 +1874,8 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)\n \t\t}\n \t\tif (owner == ICE_SCHED_NODE_OWNER_LAN)\n \t\t\tvsi_ctx->sched.max_lanq[i] = 0;\n+\t\telse\n+\t\t\tvsi_ctx->sched.max_rdmaq[i] = 0;\n \t}\n \tstatus = 0;\n \ndiff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c\nindex cf8e1553599a..eeb1b0e6f716 100644\n--- a/drivers/net/ethernet/intel/ice/ice_switch.c\n+++ b/drivers/net/ethernet/intel/ice/ice_switch.c\n@@ -310,6 +310,10 @@ static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)\n \t\t\tdevm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);\n \t\t\tvsi->lan_q_ctx[i] = NULL;\n \t\t}\n+\t\tif (vsi->rdma_q_ctx[i]) {\n+\t\t\tdevm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);\n+\t\t\tvsi->rdma_q_ctx[i] = NULL;\n+\t\t}\n \t}\n }\n \ndiff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h\nindex 96010d3d96fd..acd2f150c30b 100644\n--- a/drivers/net/ethernet/intel/ice/ice_switch.h\n+++ b/drivers/net/ethernet/intel/ice/ice_switch.h\n@@ -26,6 +26,8 @@ struct ice_vsi_ctx {\n \tu8 vf_num;\n \tu16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];\n \tstruct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];\n+\tu16 num_rdma_q_entries[ICE_MAX_TRAFFIC_CLASS];\n+\tstruct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS];\n };\n \n enum ice_sw_fwd_act_type {\ndiff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h\nindex 42b2d700bc1f..3ada92536540 100644\n--- a/drivers/net/ethernet/intel/ice/ice_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_type.h\n@@ -45,6 +45,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)\n #define ICE_DBG_FLOW\t\tBIT_ULL(9)\n #define ICE_DBG_SW\t\tBIT_ULL(13)\n #define ICE_DBG_SCHED\t\tBIT_ULL(14)\n+#define ICE_DBG_RDMA\t\tBIT_ULL(15)\n #define ICE_DBG_PKG\t\tBIT_ULL(16)\n #define ICE_DBG_RES\t\tBIT_ULL(17)\n #define ICE_DBG_AQ_MSG\t\tBIT_ULL(24)\n@@ -282,6 +283,7 @@ struct ice_sched_node {\n \tu8 tc_num;\n \tu8 owner;\n #define ICE_SCHED_NODE_OWNER_LAN\t0\n+#define ICE_SCHED_NODE_OWNER_RDMA\t2\n };\n \n /* Access Macros for Tx Sched Elements data */\n@@ -353,6 +355,7 @@ struct ice_sched_vsi_info {\n \tstruct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];\n \tstruct list_head list_entry;\n \tu16 max_lanq[ICE_MAX_TRAFFIC_CLASS];\n+\tu16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS];\n };\n \n /* driver defines the policy */\n", "prefixes": [ "net-next", "v3", "4/9" ] }