Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/886527/?format=api
{ "id": 886527, "url": "http://patchwork.ozlabs.org/api/patches/886527/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180315234802.31336-5-anirudh.venkataramanan@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20180315234802.31336-5-anirudh.venkataramanan@intel.com>", "list_archive_url": null, "date": "2018-03-15T23:47:51", "name": "[v2,04/15] ice: Get switch config, scheduler config and device capabilities", "commit_ref": null, "pull_url": null, "state": "superseded", "archived": false, "hash": "18f3c0bf26df5c12b8ad23af479ef0b888d7ec84", "submitter": { "id": 73601, "url": "http://patchwork.ozlabs.org/api/people/73601/?format=api", "name": "Anirudh Venkataramanan", "email": "anirudh.venkataramanan@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180315234802.31336-5-anirudh.venkataramanan@intel.com/mbox/", "series": [ { "id": 34096, "url": "http://patchwork.ozlabs.org/api/series/34096/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=34096", "date": "2018-03-15T23:47:47", "name": "Add ice driver", "version": 2, "mbox": "http://patchwork.ozlabs.org/series/34096/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/886527/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/886527/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.133; helo=hemlock.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=none (p=none dis=none) header.from=intel.com" ], "Received": [ "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 402QJg20lcz9sVp\n\tfor <incoming@patchwork.ozlabs.org>;\n\tFri, 16 Mar 2018 10:48:18 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 8C2098A258;\n\tThu, 15 Mar 2018 23:48:17 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id EP7SD2q6Zfj8; Thu, 15 Mar 2018 23:48:10 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id ADE478A273;\n\tThu, 15 Mar 2018 23:48:09 +0000 (UTC)", "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\tby ash.osuosl.org (Postfix) with ESMTP id 5F99C1C0359\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 15 Mar 2018 23:48:08 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 39E5788AE9\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 15 Mar 2018 23:48:08 +0000 (UTC)", "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id PubNjZnMVqPs for <intel-wired-lan@lists.osuosl.org>;\n\tThu, 15 Mar 2018 23:48:04 +0000 (UTC)", "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n\tby whitealder.osuosl.org (Postfix) with ESMTPS id 8090088A17\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 15 Mar 2018 23:48:04 +0000 (UTC)", "from fmsmga004.fm.intel.com ([10.253.24.48])\n\tby fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t15 Mar 2018 16:48:03 -0700", "from shasta.jf.intel.com ([10.166.241.32])\n\tby fmsmga004.fm.intel.com with ESMTP; 15 Mar 2018 16:48:03 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.48,313,1517904000\"; d=\"scan'208\";a=\"37836783\"", "From": "Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Thu, 15 Mar 2018 16:47:51 -0700", "Message-Id": "<20180315234802.31336-5-anirudh.venkataramanan@intel.com>", "X-Mailer": "git-send-email 2.14.3", "In-Reply-To": "<20180315234802.31336-1-anirudh.venkataramanan@intel.com>", "References": "<20180315234802.31336-1-anirudh.venkataramanan@intel.com>", "Subject": "[Intel-wired-lan] [PATCH v2 04/15] ice: Get switch config,\n\tscheduler config and device capabilities", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.24", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Cc": "netdev@vger.kernel.org", "MIME-Version": "1.0", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "This patch adds to the initialization flow by getting switch\nconfiguration, scheduler configuration and device capabilities.\n\nSwitch configuration:\nOn boot, an L2 switch element is created in the firmware per physical\nfunction. Each physical function is also mapped to a port, to which its\nswitch element is connected. In other words, this switch can be visualized\nas an embedded vSwitch that can connect a physical functions's virtual\nstation interfaces (VSIs) to the egress/ingress port. Egress/ingress\nfilters will be eventually created and applied on this switch element.\nAs part of the initialization flow, the driver gets configuration data\nfrom this switch element and stores it.\n\nScheduler configuration:\nThe Tx scheduler is a subsystem responsible for setting and enforcing QoS.\nAs part of the initialization flow, the driver queries and stores the\ndefault scheduler configuration for the given physical function.\n\nDevice capabilities:\nAs part of initialization, the driver has to determine what the device is\ncapable of (ex. max queues, VSIs, etc). This information is obtained from\nthe firmware and stored by the driver.\n\nCC: Shannon Nelson <shannon.nelson@oracle.com>\nSigned-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>\n---\nv2: Addressed Shannon Nelson's review comment by changing retry count value\n to 2.\n---\n drivers/net/ethernet/intel/ice/Makefile | 4 +-\n drivers/net/ethernet/intel/ice/ice.h | 2 +\n drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 209 ++++++++++++++\n drivers/net/ethernet/intel/ice/ice_common.c | 231 ++++++++++++++++\n drivers/net/ethernet/intel/ice/ice_common.h | 2 +\n drivers/net/ethernet/intel/ice/ice_sched.c | 354 ++++++++++++++++++++++++\n drivers/net/ethernet/intel/ice/ice_sched.h | 42 +++\n drivers/net/ethernet/intel/ice/ice_switch.c | 158 +++++++++++\n drivers/net/ethernet/intel/ice/ice_switch.h | 28 ++\n drivers/net/ethernet/intel/ice/ice_type.h | 109 ++++++++\n 10 files changed, 1138 insertions(+), 1 deletion(-)\n create mode 100644 drivers/net/ethernet/intel/ice/ice_sched.c\n create mode 100644 drivers/net/ethernet/intel/ice/ice_sched.h\n create mode 100644 drivers/net/ethernet/intel/ice/ice_switch.c\n create mode 100644 drivers/net/ethernet/intel/ice/ice_switch.h", "diff": "diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile\nindex 373d481dbb25..809d85c04398 100644\n--- a/drivers/net/ethernet/intel/ice/Makefile\n+++ b/drivers/net/ethernet/intel/ice/Makefile\n@@ -27,4 +27,6 @@ obj-$(CONFIG_ICE) += ice.o\n ice-y := ice_main.o\t\\\n \t ice_controlq.o\t\\\n \t ice_common.o\t\\\n-\t ice_nvm.o\n+\t ice_nvm.o\t\\\n+\t ice_switch.o\t\\\n+\t ice_sched.o\ndiff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h\nindex ab2800c31906..f6e3339591bb 100644\n--- a/drivers/net/ethernet/intel/ice/ice.h\n+++ b/drivers/net/ethernet/intel/ice/ice.h\n@@ -30,7 +30,9 @@\n #include <linux/bitmap.h>\n #include \"ice_devids.h\"\n #include \"ice_type.h\"\n+#include \"ice_switch.h\"\n #include \"ice_common.h\"\n+#include \"ice_sched.h\"\n \n #define ICE_BAR0\t\t0\n #define ICE_AQ_LEN\t\t64\ndiff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\nindex 05b22a1ffd70..66a3f41df673 100644\n--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n@@ -22,6 +22,8 @@\n * descriptor format. It is shared between Firmware and Software.\n */\n \n+#define ICE_AQC_TOPO_MAX_LEVEL_NUM\t0x9\n+\n struct ice_aqc_generic {\n \t__le32 param0;\n \t__le32 param1;\n@@ -82,6 +84,40 @@ struct ice_aqc_req_res {\n \tu8 reserved[2];\n };\n \n+/* Get function capabilities (indirect 0x000A)\n+ * Get device capabilities (indirect 0x000B)\n+ */\n+struct ice_aqc_list_caps {\n+\tu8 cmd_flags;\n+\tu8 pf_index;\n+\tu8 reserved[2];\n+\t__le32 count;\n+\t__le32 addr_high;\n+\t__le32 addr_low;\n+};\n+\n+/* Device/Function buffer entry, repeated per reported capability */\n+struct ice_aqc_list_caps_elem {\n+\t__le16 cap;\n+#define ICE_AQC_CAPS_VSI\t\t\t\t0x0017\n+#define ICE_AQC_CAPS_RSS\t\t\t\t0x0040\n+#define ICE_AQC_CAPS_RXQS\t\t\t\t0x0041\n+#define ICE_AQC_CAPS_TXQS\t\t\t\t0x0042\n+#define ICE_AQC_CAPS_MSIX\t\t\t\t0x0043\n+#define ICE_AQC_CAPS_MAX_MTU\t\t\t\t0x0047\n+\n+\tu8 major_ver;\n+\tu8 minor_ver;\n+\t/* Number of resources described by this capability */\n+\t__le32 number;\n+\t/* Only meaningful for some types of resources */\n+\t__le32 logical_id;\n+\t/* Only meaningful for some types of resources */\n+\t__le32 phys_id;\n+\t__le64 rsvd1;\n+\t__le64 rsvd2;\n+};\n+\n /* Clear PXE Command and response (direct 0x0110) */\n struct ice_aqc_clear_pxe {\n \tu8 rx_cnt;\n@@ -89,6 +125,161 @@ struct ice_aqc_clear_pxe {\n \tu8 reserved[15];\n };\n \n+/* Get switch configuration (0x0200) */\n+struct ice_aqc_get_sw_cfg {\n+\t/* Reserved for command and copy of request flags for response */\n+\t__le16 flags;\n+\t/* First desc in case of command and next_elem in case of response\n+\t * In case of response, if it is not zero, means all the configuration\n+\t * was not returned and new command shall be sent with this value in\n+\t * the 'first desc' field\n+\t */\n+\t__le16 element;\n+\t/* Reserved for command, only used for response */\n+\t__le16 num_elems;\n+\t__le16 rsvd;\n+\t__le32 addr_high;\n+\t__le32 addr_low;\n+};\n+\n+/* Each entry in the response buffer is of the following type: */\n+struct ice_aqc_get_sw_cfg_resp_elem {\n+\t/* VSI/Port Number */\n+\t__le16 vsi_port_num;\n+#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S\t0\n+#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M\t\\\n+\t\t\t(0x3FF << ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S)\n+#define ICE_AQC_GET_SW_CONF_RESP_TYPE_S\t14\n+#define ICE_AQC_GET_SW_CONF_RESP_TYPE_M\t(0x3 << ICE_AQC_GET_SW_CONF_RESP_TYPE_S)\n+#define ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT\t0\n+#define ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT\t1\n+#define ICE_AQC_GET_SW_CONF_RESP_VSI\t\t2\n+\n+\t/* SWID VSI/Port belongs to */\n+\t__le16 swid;\n+\n+\t/* Bit 14..0 : PF/VF number VSI belongs to\n+\t * Bit 15 : VF indication bit\n+\t */\n+\t__le16 pf_vf_num;\n+#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S\t0\n+#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M\t\\\n+\t\t\t\t(0x7FFF << ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S)\n+#define ICE_AQC_GET_SW_CONF_RESP_IS_VF\t\tBIT(15)\n+};\n+\n+/* The response buffer is as follows. Note that the length of the\n+ * elements array varies with the length of the command response.\n+ */\n+struct ice_aqc_get_sw_cfg_resp {\n+\tstruct ice_aqc_get_sw_cfg_resp_elem elements[1];\n+};\n+\n+/* Add TSE (indirect 0x0401)\n+ * Delete TSE (indirect 0x040F)\n+ * Move TSE (indirect 0x0408)\n+ */\n+struct ice_aqc_add_move_delete_elem {\n+\t__le16 num_grps_req;\n+\t__le16 num_grps_updated;\n+\t__le32 reserved;\n+\t__le32 addr_high;\n+\t__le32 addr_low;\n+};\n+\n+struct ice_aqc_elem_info_bw {\n+\t__le16 bw_profile_idx;\n+\t__le16 bw_alloc;\n+};\n+\n+struct ice_aqc_txsched_elem {\n+\tu8 elem_type; /* Special field, reserved for some aq calls */\n+#define ICE_AQC_ELEM_TYPE_UNDEFINED\t\t0x0\n+#define ICE_AQC_ELEM_TYPE_ROOT_PORT\t\t0x1\n+#define ICE_AQC_ELEM_TYPE_TC\t\t\t0x2\n+#define ICE_AQC_ELEM_TYPE_SE_GENERIC\t\t0x3\n+#define ICE_AQC_ELEM_TYPE_ENTRY_POINT\t\t0x4\n+#define ICE_AQC_ELEM_TYPE_LEAF\t\t\t0x5\n+#define ICE_AQC_ELEM_TYPE_SE_PADDED\t\t0x6\n+\tu8 valid_sections;\n+#define ICE_AQC_ELEM_VALID_GENERIC\t\tBIT(0)\n+#define ICE_AQC_ELEM_VALID_CIR\t\t\tBIT(1)\n+#define ICE_AQC_ELEM_VALID_EIR\t\t\tBIT(2)\n+#define ICE_AQC_ELEM_VALID_SHARED\t\tBIT(3)\n+\tu8 generic;\n+#define ICE_AQC_ELEM_GENERIC_MODE_M\t\t0x1\n+#define ICE_AQC_ELEM_GENERIC_PRIO_S\t\t0x1\n+#define ICE_AQC_ELEM_GENERIC_PRIO_M\t(0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S)\n+#define ICE_AQC_ELEM_GENERIC_SP_S\t\t0x4\n+#define ICE_AQC_ELEM_GENERIC_SP_M\t(0x1 << ICE_AQC_ELEM_GENERIC_SP_S)\n+#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S\t0x5\n+#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M\t\\\n+\t(0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S)\n+\tu8 flags; /* Special field, reserved for some aq calls */\n+#define ICE_AQC_ELEM_FLAG_SUSPEND_M\t\t0x1\n+\tstruct ice_aqc_elem_info_bw cir_bw;\n+\tstruct ice_aqc_elem_info_bw eir_bw;\n+\t__le16 srl_id;\n+\t__le16 reserved2;\n+};\n+\n+struct ice_aqc_txsched_elem_data {\n+\t__le32 parent_teid;\n+\t__le32 node_teid;\n+\tstruct ice_aqc_txsched_elem data;\n+};\n+\n+struct ice_aqc_txsched_topo_grp_info_hdr {\n+\t__le32 parent_teid;\n+\t__le16 num_elems;\n+\t__le16 reserved2;\n+};\n+\n+struct ice_aqc_delete_elem {\n+\tstruct ice_aqc_txsched_topo_grp_info_hdr hdr;\n+\t__le32 teid[1];\n+};\n+\n+/* Query Scheduler Resource Allocation (indirect 0x0412)\n+ * This indirect command retrieves the scheduler resources allocated by\n+ * EMP Firmware to the given PF.\n+ */\n+struct ice_aqc_query_txsched_res {\n+\tu8 reserved[8];\n+\t__le32 addr_high;\n+\t__le32 addr_low;\n+};\n+\n+struct ice_aqc_generic_sched_props {\n+\t__le16 phys_levels;\n+\t__le16 logical_levels;\n+\tu8 flattening_bitmap;\n+\tu8 max_device_cgds;\n+\tu8 max_pf_cgds;\n+\tu8 rsvd0;\n+\t__le16 rdma_qsets;\n+\tu8 rsvd1[22];\n+};\n+\n+struct ice_aqc_layer_props {\n+\tu8 logical_layer;\n+\tu8 chunk_size;\n+\t__le16 max_device_nodes;\n+\t__le16 max_pf_nodes;\n+\tu8 rsvd0[2];\n+\t__le16 max_shared_rate_lmtr;\n+\t__le16 max_children;\n+\t__le16 max_cir_rl_profiles;\n+\t__le16 max_eir_rl_profiles;\n+\t__le16 max_srl_profiles;\n+\tu8 rsvd1[14];\n+};\n+\n+struct ice_aqc_query_txsched_res_resp {\n+\tstruct ice_aqc_generic_sched_props sched_props;\n+\tstruct ice_aqc_layer_props layer_props[ICE_AQC_TOPO_MAX_LEVEL_NUM];\n+};\n+\n /* NVM Read command (indirect 0x0701)\n * NVM Erase commands (direct 0x0702)\n * NVM Update commands (indirect 0x0703)\n@@ -142,6 +333,10 @@ struct ice_aq_desc {\n \t\tstruct ice_aqc_q_shutdown q_shutdown;\n \t\tstruct ice_aqc_req_res res_owner;\n \t\tstruct ice_aqc_clear_pxe clear_pxe;\n+\t\tstruct ice_aqc_list_caps get_cap;\n+\t\tstruct ice_aqc_get_sw_cfg get_sw_conf;\n+\t\tstruct ice_aqc_query_txsched_res query_sched_res;\n+\t\tstruct ice_aqc_add_move_delete_elem add_move_delete_elem;\n \t\tstruct ice_aqc_nvm nvm;\n \t} params;\n };\n@@ -150,16 +345,19 @@ struct ice_aq_desc {\n #define ICE_AQ_LG_BUF\t512\n \n #define ICE_AQ_FLAG_LB_S\t9\n+#define ICE_AQ_FLAG_RD_S\t10\n #define ICE_AQ_FLAG_BUF_S\t12\n #define ICE_AQ_FLAG_SI_S\t13\n \n #define ICE_AQ_FLAG_LB\t\tBIT(ICE_AQ_FLAG_LB_S) /* 0x200 */\n+#define ICE_AQ_FLAG_RD\t\tBIT(ICE_AQ_FLAG_RD_S) /* 0x400 */\n #define ICE_AQ_FLAG_BUF\t\tBIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */\n #define ICE_AQ_FLAG_SI\t\tBIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */\n \n /* error codes */\n enum ice_aq_err {\n \tICE_AQ_RC_OK\t\t= 0, /* success */\n+\tICE_AQ_RC_ENOMEM\t= 9, /* Out of memory */\n \tICE_AQ_RC_EBUSY\t\t= 12, /* Device or resource busy */\n \tICE_AQ_RC_EEXIST\t= 13, /* object already exists */\n };\n@@ -174,11 +372,22 @@ enum ice_adminq_opc {\n \tice_aqc_opc_req_res\t\t\t\t= 0x0008,\n \tice_aqc_opc_release_res\t\t\t\t= 0x0009,\n \n+\t/* device/function capabilities */\n+\tice_aqc_opc_list_func_caps\t\t\t= 0x000A,\n+\tice_aqc_opc_list_dev_caps\t\t\t= 0x000B,\n+\n \t/* PXE */\n \tice_aqc_opc_clear_pxe_mode\t\t\t= 0x0110,\n \n+\t/* internal switch commands */\n+\tice_aqc_opc_get_sw_cfg\t\t\t\t= 0x0200,\n+\n \tice_aqc_opc_clear_pf_cfg\t\t\t= 0x02A4,\n \n+\t/* transmit scheduler commands */\n+\tice_aqc_opc_delete_sched_elems\t\t\t= 0x040F,\n+\tice_aqc_opc_query_sched_res\t\t\t= 0x0412,\n+\n \t/* NVM commands */\n \tice_aqc_opc_nvm_read\t\t\t\t= 0x0701,\n \ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c\nindex 04cee856d456..10f8b2ce5d44 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.c\n+++ b/drivers/net/ethernet/intel/ice/ice_common.c\n@@ -16,6 +16,7 @@\n */\n \n #include \"ice_common.h\"\n+#include \"ice_sched.h\"\n #include \"ice_adminq_cmd.h\"\n \n #define ICE_PF_RESET_WAIT_COUNT\t200\n@@ -84,8 +85,37 @@ enum ice_status ice_init_hw(struct ice_hw *hw)\n \tif (status)\n \t\tgoto err_unroll_cqinit;\n \n+\tstatus = ice_get_caps(hw);\n+\tif (status)\n+\t\tgoto err_unroll_cqinit;\n+\n+\thw->port_info = devm_kzalloc(ice_hw_to_dev(hw),\n+\t\t\t\t sizeof(*hw->port_info), GFP_KERNEL);\n+\tif (!hw->port_info) {\n+\t\tstatus = ICE_ERR_NO_MEMORY;\n+\t\tgoto err_unroll_cqinit;\n+\t}\n+\n+\t/* set the back pointer to hw */\n+\thw->port_info->hw = hw;\n+\n+\t/* Initialize port_info struct with switch configuration data */\n+\tstatus = ice_get_initial_sw_cfg(hw);\n+\tif (status)\n+\t\tgoto err_unroll_alloc;\n+\n+\t/* Query the allocated resources for tx scheduler */\n+\tstatus = ice_sched_query_res_alloc(hw);\n+\tif (status) {\n+\t\tice_debug(hw, ICE_DBG_SCHED,\n+\t\t\t \"Failed to get scheduler allocated resources\\n\");\n+\t\tgoto err_unroll_alloc;\n+\t}\n+\n \treturn 0;\n \n+err_unroll_alloc:\n+\tdevm_kfree(ice_hw_to_dev(hw), hw->port_info);\n err_unroll_cqinit:\n \tice_shutdown_all_ctrlq(hw);\n \treturn status;\n@@ -97,7 +127,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw)\n */\n void ice_deinit_hw(struct ice_hw *hw)\n {\n+\tice_sched_cleanup_all(hw);\n \tice_shutdown_all_ctrlq(hw);\n+\tif (hw->port_info) {\n+\t\tdevm_kfree(ice_hw_to_dev(hw), hw->port_info);\n+\t\thw->port_info = NULL;\n+\t}\n }\n \n /**\n@@ -519,6 +554,202 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)\n \t}\n }\n \n+/**\n+ * ice_parse_caps - parse function/device capabilities\n+ * @hw: pointer to the hw struct\n+ * @buf: pointer to a buffer containing function/device capability records\n+ * @cap_count: number of capability records in the list\n+ * @opc: type of capabilities list to parse\n+ *\n+ * Helper function to parse function(0x000a)/device(0x000b) capabilities list.\n+ */\n+static void\n+ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,\n+\t enum ice_adminq_opc opc)\n+{\n+\tstruct ice_aqc_list_caps_elem *cap_resp;\n+\tstruct ice_hw_func_caps *func_p = NULL;\n+\tstruct ice_hw_dev_caps *dev_p = NULL;\n+\tstruct ice_hw_common_caps *caps;\n+\tu32 i;\n+\n+\tif (!buf)\n+\t\treturn;\n+\n+\tcap_resp = (struct ice_aqc_list_caps_elem *)buf;\n+\n+\tif (opc == ice_aqc_opc_list_dev_caps) {\n+\t\tdev_p = &hw->dev_caps;\n+\t\tcaps = &dev_p->common_cap;\n+\t} else if (opc == ice_aqc_opc_list_func_caps) {\n+\t\tfunc_p = &hw->func_caps;\n+\t\tcaps = &func_p->common_cap;\n+\t} else {\n+\t\tice_debug(hw, ICE_DBG_INIT, \"wrong opcode\\n\");\n+\t\treturn;\n+\t}\n+\n+\tfor (i = 0; caps && i < cap_count; i++, cap_resp++) {\n+\t\tu32 logical_id = le32_to_cpu(cap_resp->logical_id);\n+\t\tu32 phys_id = le32_to_cpu(cap_resp->phys_id);\n+\t\tu32 number = le32_to_cpu(cap_resp->number);\n+\t\tu16 cap = le16_to_cpu(cap_resp->cap);\n+\n+\t\tswitch (cap) {\n+\t\tcase ICE_AQC_CAPS_VSI:\n+\t\t\tif (dev_p) {\n+\t\t\t\tdev_p->num_vsi_allocd_to_host = number;\n+\t\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t\t \"HW caps: Dev.VSI cnt = %d\\n\",\n+\t\t\t\t\t dev_p->num_vsi_allocd_to_host);\n+\t\t\t} else if (func_p) {\n+\t\t\t\tfunc_p->guaranteed_num_vsi = number;\n+\t\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t\t \"HW caps: Func.VSI cnt = %d\\n\",\n+\t\t\t\t\t func_p->guaranteed_num_vsi);\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase ICE_AQC_CAPS_RSS:\n+\t\t\tcaps->rss_table_size = number;\n+\t\t\tcaps->rss_table_entry_width = logical_id;\n+\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t \"HW caps: RSS table size = %d\\n\",\n+\t\t\t\t caps->rss_table_size);\n+\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t \"HW caps: RSS table width = %d\\n\",\n+\t\t\t\t caps->rss_table_entry_width);\n+\t\t\tbreak;\n+\t\tcase ICE_AQC_CAPS_RXQS:\n+\t\t\tcaps->num_rxq = number;\n+\t\t\tcaps->rxq_first_id = phys_id;\n+\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t \"HW caps: Num Rx Qs = %d\\n\", caps->num_rxq);\n+\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t \"HW caps: Rx first queue ID = %d\\n\",\n+\t\t\t\t caps->rxq_first_id);\n+\t\t\tbreak;\n+\t\tcase ICE_AQC_CAPS_TXQS:\n+\t\t\tcaps->num_txq = number;\n+\t\t\tcaps->txq_first_id = phys_id;\n+\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t \"HW caps: Num Tx Qs = %d\\n\", caps->num_txq);\n+\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t \"HW caps: Tx first queue ID = %d\\n\",\n+\t\t\t\t caps->txq_first_id);\n+\t\t\tbreak;\n+\t\tcase ICE_AQC_CAPS_MSIX:\n+\t\t\tcaps->num_msix_vectors = number;\n+\t\t\tcaps->msix_vector_first_id = phys_id;\n+\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t \"HW caps: MSIX vector count = %d\\n\",\n+\t\t\t\t caps->num_msix_vectors);\n+\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t \"HW caps: MSIX first vector index = %d\\n\",\n+\t\t\t\t caps->msix_vector_first_id);\n+\t\t\tbreak;\n+\t\tcase ICE_AQC_CAPS_MAX_MTU:\n+\t\t\tcaps->max_mtu = number;\n+\t\t\tif (dev_p)\n+\t\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t\t \"HW caps: Dev.MaxMTU = %d\\n\",\n+\t\t\t\t\t caps->max_mtu);\n+\t\t\telse if (func_p)\n+\t\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t\t \"HW caps: func.MaxMTU = %d\\n\",\n+\t\t\t\t\t caps->max_mtu);\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tice_debug(hw, ICE_DBG_INIT,\n+\t\t\t\t \"HW caps: Unknown capability[%d]: 0x%x\\n\", i,\n+\t\t\t\t cap);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * ice_aq_discover_caps - query function/device capabilities\n+ * @hw: pointer to the hw struct\n+ * @buf: a virtual buffer to hold the capabilities\n+ * @buf_size: Size of the virtual buffer\n+ * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM\n+ * @opc: capabilities type to discover - pass in the command opcode\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Get the function(0x000a)/device(0x000b) capabilities description from\n+ * the firmware.\n+ */\n+static enum ice_status\n+ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,\n+\t\t enum ice_adminq_opc opc, struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_list_caps *cmd;\n+\tstruct ice_aq_desc desc;\n+\tenum ice_status status;\n+\n+\tcmd = &desc.params.get_cap;\n+\n+\tif (opc != ice_aqc_opc_list_func_caps &&\n+\t opc != ice_aqc_opc_list_dev_caps)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tice_fill_dflt_direct_cmd_desc(&desc, opc);\n+\n+\tstatus = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);\n+\tif (!status)\n+\t\tice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);\n+\t*data_size = le16_to_cpu(desc.datalen);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * ice_get_caps - get info about the HW\n+ * @hw: pointer to the hardware structure\n+ */\n+enum ice_status ice_get_caps(struct ice_hw *hw)\n+{\n+\tenum ice_status status;\n+\tu16 data_size = 0;\n+\tu16 cbuf_len;\n+\tu8 retries;\n+\n+\t/* The driver doesn't know how many capabilities the device will return\n+\t * so the buffer size required isn't known ahead of time. The driver\n+\t * starts with cbuf_len and if this turns out to be insufficient, the\n+\t * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.\n+\t * The driver then allocates the buffer of this size and retries the\n+\t * operation. So it follows that the retry count is 2.\n+\t */\n+#define ICE_GET_CAP_BUF_COUNT\t40\n+#define ICE_GET_CAP_RETRY_COUNT\t2\n+\n+\tcbuf_len = ICE_GET_CAP_BUF_COUNT *\n+\t\tsizeof(struct ice_aqc_list_caps_elem);\n+\n+\tretries = ICE_GET_CAP_RETRY_COUNT;\n+\n+\tdo {\n+\t\tvoid *cbuf;\n+\n+\t\tcbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);\n+\t\tif (!cbuf)\n+\t\t\treturn ICE_ERR_NO_MEMORY;\n+\n+\t\tstatus = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,\n+\t\t\t\t\t ice_aqc_opc_list_func_caps, NULL);\n+\t\tdevm_kfree(ice_hw_to_dev(hw), cbuf);\n+\n+\t\tif (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)\n+\t\t\tbreak;\n+\n+\t\t/* If ENOMEM is returned, try again with bigger buffer */\n+\t\tcbuf_len = data_size;\n+\t} while (--retries);\n+\n+\treturn status;\n+}\n+\n /**\n * ice_aq_clear_pxe_mode\n * @hw: pointer to the hw struct\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h\nindex 0876fd98090a..63ca2a26a274 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.h\n+++ b/drivers/net/ethernet/intel/ice/ice_common.h\n@@ -20,6 +20,7 @@\n \n #include \"ice.h\"\n #include \"ice_type.h\"\n+#include \"ice_switch.h\"\n \n void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,\n \t\t u16 buf_len);\n@@ -39,6 +40,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,\n \t\tstruct ice_aq_desc *desc, void *buf, u16 buf_size,\n \t\tstruct ice_sq_cd *cd);\n void ice_clear_pxe_mode(struct ice_hw *hw);\n+enum ice_status ice_get_caps(struct ice_hw *hw);\n bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);\n enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);\n void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c\nnew file mode 100644\nindex 000000000000..66e48ae4a3ed\n--- /dev/null\n+++ b/drivers/net/ethernet/intel/ice/ice_sched.c\n@@ -0,0 +1,354 @@\n+// SPDX-License-Identifier: GPL-2.0-only\n+/* Intel(R) Ethernet Connection E800 Series Linux Driver\n+ * Copyright (c) 2018, Intel Corporation.\n+ *\n+ * This program is free software; you can redistribute it and/or modify it\n+ * under the terms and conditions of the GNU General Public License,\n+ * version 2, as published by the Free Software Foundation.\n+ *\n+ * This program is distributed in the hope it will be useful, but WITHOUT\n+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n+ * more details.\n+ *\n+ * The full GNU General Public License is included in this distribution in\n+ * the file called \"COPYING\".\n+ */\n+\n+#include \"ice_sched.h\"\n+\n+/**\n+ * ice_aq_delete_sched_elems - delete scheduler elements\n+ * @hw: pointer to the hw struct\n+ * @grps_req: number of groups to delete\n+ * @buf: pointer to buffer\n+ * @buf_size: buffer size in bytes\n+ * @grps_del: returns total number of elements deleted\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Delete scheduling elements (0x040F)\n+ */\n+static enum ice_status\n+ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,\n+\t\t\t struct ice_aqc_delete_elem *buf, u16 buf_size,\n+\t\t\t u16 *grps_del, struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_add_move_delete_elem *cmd;\n+\tstruct ice_aq_desc desc;\n+\tenum ice_status status;\n+\n+\tcmd = &desc.params.add_move_delete_elem;\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems);\n+\tdesc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);\n+\tcmd->num_grps_req = cpu_to_le16(grps_req);\n+\n+\tstatus = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);\n+\tif (!status && grps_del)\n+\t\t*grps_del = le16_to_cpu(cmd->num_grps_updated);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * ice_sched_remove_elems - remove nodes from hw\n+ * @hw: pointer to the hw struct\n+ * @parent: pointer to the parent node\n+ * @num_nodes: number of nodes\n+ * @node_teids: array of node teids to be deleted\n+ *\n+ * This function remove nodes from hw\n+ */\n+static enum ice_status\n+ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,\n+\t\t u16 num_nodes, u32 *node_teids)\n+{\n+\tstruct ice_aqc_delete_elem *buf;\n+\tu16 i, num_groups_removed = 0;\n+\tenum ice_status status;\n+\tu16 buf_size;\n+\n+\tbuf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);\n+\tbuf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);\n+\tif (!buf)\n+\t\treturn ICE_ERR_NO_MEMORY;\n+\tbuf->hdr.parent_teid = parent->info.node_teid;\n+\tbuf->hdr.num_elems = cpu_to_le16(num_nodes);\n+\tfor (i = 0; i < num_nodes; i++)\n+\t\tbuf->teid[i] = cpu_to_le32(node_teids[i]);\n+\tstatus = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,\n+\t\t\t\t\t &num_groups_removed, NULL);\n+\tif (status || num_groups_removed != 1)\n+\t\tice_debug(hw, ICE_DBG_SCHED, \"remove elements failed\\n\");\n+\tdevm_kfree(ice_hw_to_dev(hw), buf);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_sched_get_first_node - get the first node of the given layer\n+ * @hw: pointer to the hw struct\n+ * @parent: pointer the base node of the subtree\n+ * @layer: layer number\n+ *\n+ * This function retrieves the first node of the given layer from the subtree\n+ */\n+static struct ice_sched_node *\n+ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent,\n+\t\t\t u8 layer)\n+{\n+\tu8 i;\n+\n+\tif (layer < hw->sw_entry_point_layer)\n+\t\treturn NULL;\n+\tfor (i = 0; i < parent->num_children; i++) {\n+\t\tstruct ice_sched_node *node = parent->children[i];\n+\n+\t\tif (node) {\n+\t\t\tif (node->tx_sched_layer == layer)\n+\t\t\t\treturn node;\n+\t\t\t/* this recursion is intentional, and wouldn't\n+\t\t\t * go more than 9 calls\n+\t\t\t */\n+\t\t\treturn ice_sched_get_first_node(hw, node, layer);\n+\t\t}\n+\t}\n+\treturn NULL;\n+}\n+\n+/**\n+ * ice_sched_get_tc_node - get pointer to TC node\n+ * @pi: port information structure\n+ * @tc: TC number\n+ *\n+ * This function returns the TC node pointer\n+ */\n+struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)\n+{\n+\tu8 i;\n+\n+\tif (!pi)\n+\t\treturn NULL;\n+\tfor (i = 0; i < pi->root->num_children; i++)\n+\t\tif (pi->root->children[i]->tc_num == tc)\n+\t\t\treturn pi->root->children[i];\n+\treturn NULL;\n+}\n+\n+/**\n+ * ice_free_sched_node - Free a Tx scheduler node from SW DB\n+ * @pi: port information structure\n+ * @node: pointer to the ice_sched_node struct\n+ *\n+ * This function frees up a node from SW DB as well as from HW\n+ *\n+ * This function needs to be called with the port_info->sched_lock held\n+ */\n+void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)\n+{\n+\tstruct ice_sched_node *parent;\n+\tstruct ice_hw *hw = pi->hw;\n+\tu8 i, j;\n+\n+\t/* Free the children before freeing up the parent node\n+\t * The parent array is updated below and that shifts the nodes\n+\t * in the array. So always pick the first child if num children > 0\n+\t */\n+\twhile (node->num_children)\n+\t\tice_free_sched_node(pi, node->children[0]);\n+\n+\t/* Leaf, TC and root nodes can't be deleted by SW */\n+\tif (node->tx_sched_layer >= hw->sw_entry_point_layer &&\n+\t node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&\n+\t node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&\n+\t node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {\n+\t\tu32 teid = le32_to_cpu(node->info.node_teid);\n+\t\tenum ice_status status;\n+\n+\t\tstatus = ice_sched_remove_elems(hw, node->parent, 1, &teid);\n+\t\tif (status)\n+\t\t\tice_debug(hw, ICE_DBG_SCHED,\n+\t\t\t\t \"remove element failed %d\\n\", status);\n+\t}\n+\tparent = node->parent;\n+\t/* root has no parent */\n+\tif (parent) {\n+\t\tstruct ice_sched_node *p, *tc_node;\n+\n+\t\t/* update the parent */\n+\t\tfor (i = 0; i < parent->num_children; i++)\n+\t\t\tif (parent->children[i] == node) {\n+\t\t\t\tfor (j = i + 1; j < parent->num_children; j++)\n+\t\t\t\t\tparent->children[j - 1] =\n+\t\t\t\t\t\tparent->children[j];\n+\t\t\t\tparent->num_children--;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t/* search for previous sibling that points to this node and\n+\t\t * remove the reference\n+\t\t */\n+\t\ttc_node = ice_sched_get_tc_node(pi, node->tc_num);\n+\t\tif (!tc_node) {\n+\t\t\tice_debug(hw, ICE_DBG_SCHED,\n+\t\t\t\t \"Invalid TC number %d\\n\", node->tc_num);\n+\t\t\tgoto err_exit;\n+\t\t}\n+\t\tp = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer);\n+\t\twhile (p) {\n+\t\t\tif (p->sibling == node) {\n+\t\t\t\tp->sibling = node->sibling;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tp = p->sibling;\n+\t\t}\n+\t}\n+err_exit:\n+\t/* leaf nodes have no children */\n+\tif (node->children)\n+\t\tdevm_kfree(ice_hw_to_dev(hw), node->children);\n+\tdevm_kfree(ice_hw_to_dev(hw), node);\n+}\n+\n+/**\n+ * ice_aq_query_sched_res - query scheduler resource\n+ * @hw: pointer to the hw struct\n+ * @buf_size: buffer size in bytes\n+ * @buf: pointer to buffer\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Query scheduler resource allocation (0x0412)\n+ */\n+static enum ice_status\n+ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,\n+\t\t struct ice_aqc_query_txsched_res_resp *buf,\n+\t\t struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aq_desc desc;\n+\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);\n+\treturn ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);\n+}\n+\n+/**\n+ * ice_sched_clear_tx_topo - clears the schduler tree nodes\n+ * @pi: port information structure\n+ *\n+ * This function removes all the nodes from HW as well as from SW DB.\n+ */\n+static void ice_sched_clear_tx_topo(struct ice_port_info *pi)\n+{\n+\tstruct ice_sched_agg_info *agg_info;\n+\tstruct ice_sched_vsi_info *vsi_elem;\n+\tstruct ice_sched_agg_info *atmp;\n+\tstruct ice_sched_vsi_info *tmp;\n+\tstruct ice_hw *hw;\n+\n+\tif (!pi)\n+\t\treturn;\n+\n+\thw = pi->hw;\n+\n+\tlist_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {\n+\t\tstruct ice_sched_agg_vsi_info *agg_vsi_info;\n+\t\tstruct ice_sched_agg_vsi_info *vtmp;\n+\n+\t\tlist_for_each_entry_safe(agg_vsi_info, vtmp,\n+\t\t\t\t\t &agg_info->agg_vsi_list, list_entry) {\n+\t\t\tlist_del(&agg_vsi_info->list_entry);\n+\t\t\tdevm_kfree(ice_hw_to_dev(hw), agg_vsi_info);\n+\t\t}\n+\t}\n+\n+\t/* remove the vsi list */\n+\tlist_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list,\n+\t\t\t\t list_entry) {\n+\t\tlist_del(&vsi_elem->list_entry);\n+\t\tdevm_kfree(ice_hw_to_dev(hw), vsi_elem);\n+\t}\n+\n+\tif (pi->root) {\n+\t\tice_free_sched_node(pi, pi->root);\n+\t\tpi->root = NULL;\n+\t}\n+}\n+\n+/**\n+ * ice_sched_clear_port - clear the scheduler elements from SW DB for a port\n+ * @pi: port information structure\n+ *\n+ * Cleanup scheduling elements from SW DB\n+ */\n+static void ice_sched_clear_port(struct ice_port_info *pi)\n+{\n+\tif (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)\n+\t\treturn;\n+\n+\tpi->port_state = ICE_SCHED_PORT_STATE_INIT;\n+\tmutex_lock(&pi->sched_lock);\n+\tice_sched_clear_tx_topo(pi);\n+\tmutex_unlock(&pi->sched_lock);\n+\tmutex_destroy(&pi->sched_lock);\n+}\n+\n+/**\n+ * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports\n+ * @hw: pointer to the hw struct\n+ *\n+ * Cleanup scheduling elements from SW DB for all the ports\n+ */\n+void ice_sched_cleanup_all(struct ice_hw *hw)\n+{\n+\tif (!hw || !hw->port_info)\n+\t\treturn;\n+\n+\tif (hw->layer_info)\n+\t\tdevm_kfree(ice_hw_to_dev(hw), hw->layer_info);\n+\n+\tice_sched_clear_port(hw->port_info);\n+\n+\thw->num_tx_sched_layers = 0;\n+\thw->num_tx_sched_phys_layers = 0;\n+\thw->flattened_layers = 0;\n+\thw->max_cgds = 0;\n+}\n+\n+/**\n+ * ice_sched_query_res_alloc - query the FW for num of logical sched layers\n+ * @hw: pointer to the HW struct\n+ *\n+ * query FW for allocated scheduler resources and store in HW struct\n+ */\n+enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)\n+{\n+\tstruct ice_aqc_query_txsched_res_resp *buf;\n+\tenum ice_status status = 0;\n+\n+\tif (hw->layer_info)\n+\t\treturn status;\n+\n+\tbuf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);\n+\tif (!buf)\n+\t\treturn ICE_ERR_NO_MEMORY;\n+\n+\tstatus = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);\n+\tif (status)\n+\t\tgoto sched_query_out;\n+\n+\thw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);\n+\thw->num_tx_sched_phys_layers =\n+\t\tle16_to_cpu(buf->sched_props.phys_levels);\n+\thw->flattened_layers = buf->sched_props.flattening_bitmap;\n+\thw->max_cgds = buf->sched_props.max_pf_cgds;\n+\n+\t hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,\n+\t\t\t\t (hw->num_tx_sched_layers *\n+\t\t\t\t\tsizeof(*hw->layer_info)),\n+\t\t\t\t GFP_KERNEL);\n+\tif (!hw->layer_info) {\n+\t\tstatus = ICE_ERR_NO_MEMORY;\n+\t\tgoto sched_query_out;\n+\t}\n+\n+sched_query_out:\n+\tdevm_kfree(ice_hw_to_dev(hw), buf);\n+\treturn status;\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h\nnew file mode 100644\nindex 000000000000..fb93acf340ed\n--- /dev/null\n+++ b/drivers/net/ethernet/intel/ice/ice_sched.h\n@@ -0,0 +1,42 @@\n+/* SPDX-License-Identifier: GPL-2.0-only */\n+/* Intel(R) Ethernet Connection E800 Series Linux Driver\n+ * Copyright (c) 2018, Intel Corporation.\n+ *\n+ * This program is free software; you can redistribute it and/or modify it\n+ * under the terms and conditions of the GNU General Public License,\n+ * version 2, as published by the Free Software Foundation.\n+ *\n+ * This program is distributed in the hope it will be useful, but WITHOUT\n+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n+ * more details.\n+ *\n+ * The full GNU General Public License is included in this distribution in\n+ * the file called \"COPYING\".\n+ */\n+\n+#ifndef _ICE_SCHED_H_\n+#define _ICE_SCHED_H_\n+\n+#include \"ice_common.h\"\n+\n+struct ice_sched_agg_vsi_info {\n+\tstruct list_head list_entry;\n+\tDECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);\n+\tu16 vsi_id;\n+};\n+\n+struct ice_sched_agg_info {\n+\tstruct list_head agg_vsi_list;\n+\tstruct list_head list_entry;\n+\tDECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);\n+\tu32 agg_id;\n+\tenum ice_agg_type agg_type;\n+};\n+\n+/* FW AQ command calls */\n+enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);\n+void ice_sched_cleanup_all(struct ice_hw *hw);\n+void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);\n+struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);\n+#endif /* _ICE_SCHED_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c\nnew file mode 100644\nindex 000000000000..5824a1e57a17\n--- /dev/null\n+++ b/drivers/net/ethernet/intel/ice/ice_switch.c\n@@ -0,0 +1,158 @@\n+// SPDX-License-Identifier: GPL-2.0-only\n+/* Intel(R) Ethernet Connection E800 Series Linux Driver\n+ * Copyright (c) 2018, Intel Corporation.\n+ *\n+ * This program is free software; you can redistribute it and/or modify it\n+ * under the terms and conditions of the GNU General Public License,\n+ * version 2, as published by the Free Software Foundation.\n+ *\n+ * This program is distributed in the hope it will be useful, but WITHOUT\n+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n+ * more details.\n+ *\n+ * The full GNU General Public License is included in this distribution in\n+ * the file called \"COPYING\".\n+ */\n+\n+#include \"ice_switch.h\"\n+\n+/**\n+ * ice_aq_get_sw_cfg - get switch configuration\n+ * @hw: pointer to the hardware structure\n+ * @buf: pointer to the result buffer\n+ * @buf_size: length of the buffer available for response\n+ * @req_desc: pointer to requested descriptor\n+ * @num_elems: pointer to number of elements\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Get switch configuration (0x0200) to be placed in 'buff'.\n+ * This admin command returns information such as initial VSI/port number\n+ * and switch ID it belongs to.\n+ *\n+ * NOTE: *req_desc is both an input/output parameter.\n+ * The caller of this function first calls this function with *request_desc set\n+ * to 0. If the response from f/w has *req_desc set to 0, all the switch\n+ * configuration information has been returned; if non-zero (meaning not all\n+ * the information was returned), the caller should call this function again\n+ * with *req_desc set to the previous value returned by f/w to get the\n+ * next block of switch configuration information.\n+ *\n+ * *num_elems is output only parameter. This reflects the number of elements\n+ * in response buffer. The caller of this function to use *num_elems while\n+ * parsing the response buffer.\n+ */\n+static enum ice_status\n+ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,\n+\t\t u16 buf_size, u16 *req_desc, u16 *num_elems,\n+\t\t struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_get_sw_cfg *cmd;\n+\tenum ice_status status;\n+\tstruct ice_aq_desc desc;\n+\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);\n+\tcmd = &desc.params.get_sw_conf;\n+\tcmd->element = cpu_to_le16(*req_desc);\n+\n+\tstatus = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);\n+\tif (!status) {\n+\t\t*req_desc = le16_to_cpu(cmd->element);\n+\t\t*num_elems = le16_to_cpu(cmd->num_elems);\n+\t}\n+\n+\treturn status;\n+}\n+\n+/* ice_init_port_info - Initialize port_info with switch configuration data\n+ * @pi: pointer to port_info\n+ * @vsi_port_num: VSI number or port number\n+ * @type: Type of switch element (port or VSI)\n+ * @swid: switch ID of the switch the element is attached to\n+ * @pf_vf_num: PF or VF number\n+ * @is_vf: true if the element is a VF, false otherwise\n+ */\n+static void\n+ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,\n+\t\t u16 swid, u16 pf_vf_num, bool is_vf)\n+{\n+\tswitch (type) {\n+\tcase ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:\n+\t\tpi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);\n+\t\tpi->sw_id = swid;\n+\t\tpi->pf_vf_num = pf_vf_num;\n+\t\tpi->is_vf = is_vf;\n+\t\tpi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;\n+\t\tpi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;\n+\t\tbreak;\n+\tdefault:\n+\t\tice_debug(pi->hw, ICE_DBG_SW,\n+\t\t\t \"incorrect VSI/port type received\\n\");\n+\t\tbreak;\n+\t}\n+}\n+\n+/* ice_get_initial_sw_cfg - Get initial port and default VSI data\n+ * @hw: pointer to the hardware structure\n+ */\n+enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)\n+{\n+\tstruct ice_aqc_get_sw_cfg_resp *rbuf;\n+\tenum ice_status status;\n+\tu16 req_desc = 0;\n+\tu16 num_elems;\n+\tu16 i;\n+\n+\trbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,\n+\t\t\t GFP_KERNEL);\n+\n+\tif (!rbuf)\n+\t\treturn ICE_ERR_NO_MEMORY;\n+\n+\t/* Multiple calls to ice_aq_get_sw_cfg may be required\n+\t * to get all the switch configuration information. The need\n+\t * for additional calls is indicated by ice_aq_get_sw_cfg\n+\t * writing a non-zero value in req_desc\n+\t */\n+\tdo {\n+\t\tstatus = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,\n+\t\t\t\t\t &req_desc, &num_elems, NULL);\n+\n+\t\tif (status)\n+\t\t\tbreak;\n+\n+\t\tfor (i = 0; i < num_elems; i++) {\n+\t\t\tstruct ice_aqc_get_sw_cfg_resp_elem *ele;\n+\t\t\tu16 pf_vf_num, swid, vsi_port_num;\n+\t\t\tbool is_vf = false;\n+\t\t\tu8 type;\n+\n+\t\t\tele = rbuf[i].elements;\n+\t\t\tvsi_port_num = le16_to_cpu(ele->vsi_port_num) &\n+\t\t\t\tICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;\n+\n+\t\t\tpf_vf_num = le16_to_cpu(ele->pf_vf_num) &\n+\t\t\t\tICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;\n+\n+\t\t\tswid = le16_to_cpu(ele->swid);\n+\n+\t\t\tif (le16_to_cpu(ele->pf_vf_num) &\n+\t\t\t ICE_AQC_GET_SW_CONF_RESP_IS_VF)\n+\t\t\t\tis_vf = true;\n+\n+\t\t\ttype = le16_to_cpu(ele->vsi_port_num) >>\n+\t\t\t\tICE_AQC_GET_SW_CONF_RESP_TYPE_S;\n+\n+\t\t\tif (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {\n+\t\t\t\t/* FW VSI is not needed. Just continue. */\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\n+\t\t\tice_init_port_info(hw->port_info, vsi_port_num,\n+\t\t\t\t\t type, swid, pf_vf_num, is_vf);\n+\t\t}\n+\t} while (req_desc && !status);\n+\n+\tdevm_kfree(ice_hw_to_dev(hw), (void *)rbuf);\n+\treturn status;\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h\nnew file mode 100644\nindex 000000000000..57d10e58e0b2\n--- /dev/null\n+++ b/drivers/net/ethernet/intel/ice/ice_switch.h\n@@ -0,0 +1,28 @@\n+/* SPDX-License-Identifier: GPL-2.0-only */\n+/* Intel(R) Ethernet Connection E800 Series Linux Driver\n+ * Copyright (c) 2018, Intel Corporation.\n+ *\n+ * This program is free software; you can redistribute it and/or modify it\n+ * under the terms and conditions of the GNU General Public License,\n+ * version 2, as published by the Free Software Foundation.\n+ *\n+ * This program is distributed in the hope it will be useful, but WITHOUT\n+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n+ * more details.\n+ *\n+ * The full GNU General Public License is included in this distribution in\n+ * the file called \"COPYING\".\n+ */\n+\n+#ifndef _ICE_SWITCH_H_\n+#define _ICE_SWITCH_H_\n+\n+#include \"ice_common.h\"\n+\n+#define ICE_SW_CFG_MAX_BUF_LEN 2048\n+#define ICE_DFLT_VSI_INVAL 0xff\n+\n+enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);\n+\n+#endif /* _ICE_SWITCH_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h\nindex e40fab48cf7f..69a8c1a5ce84 100644\n--- a/drivers/net/ethernet/intel/ice/ice_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_type.h\n@@ -26,6 +26,8 @@\n /* debug masks - set these bits in hw->debug_mask to control output */\n #define ICE_DBG_INIT\t\tBIT_ULL(1)\n #define ICE_DBG_NVM\t\tBIT_ULL(7)\n+#define ICE_DBG_SW\t\tBIT_ULL(13)\n+#define ICE_DBG_SCHED\t\tBIT_ULL(14)\n #define ICE_DBG_RES\t\tBIT_ULL(17)\n #define ICE_DBG_AQ_MSG\t\tBIT_ULL(24)\n #define ICE_DBG_AQ_CMD\t\tBIT_ULL(27)\n@@ -48,6 +50,38 @@ enum ice_mac_type {\n \tICE_MAC_GENERIC,\n };\n \n+/* Common HW capabilities for SW use */\n+struct ice_hw_common_caps {\n+\t/* TX/RX queues */\n+\tu16 num_rxq;\t\t/* Number/Total RX queues */\n+\tu16 rxq_first_id;\t/* First queue ID for RX queues */\n+\tu16 num_txq;\t\t/* Number/Total TX queues */\n+\tu16 txq_first_id;\t/* First queue ID for TX queues */\n+\n+\t/* MSI-X vectors */\n+\tu16 num_msix_vectors;\n+\tu16 msix_vector_first_id;\n+\n+\t/* Max MTU for function or device */\n+\tu16 max_mtu;\n+\n+\t/* RSS related capabilities */\n+\tu16 rss_table_size;\t\t/* 512 for PFs and 64 for VFs */\n+\tu8 rss_table_entry_width;\t/* RSS Entry width in bits */\n+};\n+\n+/* Function specific capabilities */\n+struct ice_hw_func_caps {\n+\tstruct ice_hw_common_caps common_cap;\n+\tu32 guaranteed_num_vsi;\n+};\n+\n+/* Device wide capabilities */\n+struct ice_hw_dev_caps {\n+\tstruct ice_hw_common_caps common_cap;\n+\tu32 num_vsi_allocd_to_host;\t/* Excluding EMP VSI */\n+};\n+\n /* Various RESET request, These are not tied with HW reset types */\n enum ice_reset_req {\n \tICE_RESET_PFR\t= 0,\n@@ -70,10 +104,76 @@ struct ice_nvm_info {\n \tbool blank_nvm_mode; /* is NVM empty (no FW present) */\n };\n \n+/* Max number of port to queue branches w.r.t topology */\n+#define ICE_MAX_TRAFFIC_CLASS 8\n+\n+struct ice_sched_node {\n+\tstruct ice_sched_node *parent;\n+\tstruct ice_sched_node *sibling; /* next sibling in the same layer */\n+\tstruct ice_sched_node **children;\n+\tstruct ice_aqc_txsched_elem_data info;\n+\tu32 agg_id;\t\t\t/* aggregator group id */\n+\tu16 vsi_id;\n+\tbool in_use;\t\t\t/* suspended or in use */\n+\tu8 tx_sched_layer;\t\t/* Logical Layer (1-9) */\n+\tu8 num_children;\n+\tu8 tc_num;\n+\tu8 owner;\n+#define ICE_SCHED_NODE_OWNER_LAN\t0\n+};\n+\n+/* The aggregator type determines if identifier is for a VSI group,\n+ * aggregator group, aggregator of queues, or queue group.\n+ */\n+enum ice_agg_type {\n+\tICE_AGG_TYPE_UNKNOWN = 0,\n+\tICE_AGG_TYPE_VSI,\n+\tICE_AGG_TYPE_AGG, /* aggregator */\n+\tICE_AGG_TYPE_Q,\n+\tICE_AGG_TYPE_QG\n+};\n+\n+/* vsi type list entry to locate corresponding vsi/ag nodes */\n+struct ice_sched_vsi_info {\n+\tstruct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];\n+\tstruct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];\n+\tstruct list_head list_entry;\n+\tu16 max_lanq[ICE_MAX_TRAFFIC_CLASS];\n+\tu16 vsi_id;\n+};\n+\n+/* driver defines the policy */\n+struct ice_sched_tx_policy {\n+\tu16 max_num_vsis;\n+\tu8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];\n+\tbool rdma_ena;\n+};\n+\n+struct ice_port_info {\n+\tstruct ice_sched_node *root;\t/* Root Node per Port */\n+\tstruct ice_hw *hw;\t\t/* back pointer to hw instance */\n+\tu16 sw_id;\t\t\t/* Initial switch ID belongs to port */\n+\tu16 pf_vf_num;\n+\tu8 port_state;\n+#define ICE_SCHED_PORT_STATE_INIT\t0x0\n+#define ICE_SCHED_PORT_STATE_READY\t0x1\n+\tu16 dflt_tx_vsi_num;\n+\tu16 dflt_rx_vsi_num;\n+\tstruct mutex sched_lock;\t/* protect access to TXSched tree */\n+\tstruct ice_sched_tx_policy sched_policy;\n+\tstruct list_head vsi_info_list;\n+\tstruct list_head agg_list;\t/* lists all aggregator */\n+\tu8 lport;\n+#define ICE_LPORT_MASK\t\t0xff\n+\tbool is_vf;\n+};\n+\n /* Port hardware description */\n struct ice_hw {\n \tu8 __iomem *hw_addr;\n \tvoid *back;\n+\tstruct ice_aqc_layer_props *layer_info;\n+\tstruct ice_port_info *port_info;\n \tu64 debug_mask;\t\t/* bitmap for debug mask */\n \tenum ice_mac_type mac_type;\n \n@@ -86,8 +186,17 @@ struct ice_hw {\n \n \tu8 pf_id;\t\t/* device profile info */\n \n+\t/* TX Scheduler values */\n+\tu16 num_tx_sched_layers;\n+\tu16 num_tx_sched_phys_layers;\n+\tu8 flattened_layers;\n+\tu8 max_cgds;\n+\tu8 sw_entry_point_layer;\n+\n \tstruct ice_bus_info bus;\n \tstruct ice_nvm_info nvm;\n+\tstruct ice_hw_dev_caps dev_caps;\t/* device capabilities */\n+\tstruct ice_hw_func_caps func_caps;\t/* function capabilities */\n \n \t/* Control Queue info */\n \tstruct ice_ctl_q_info adminq;\n", "prefixes": [ "v2", "04/15" ] }