get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1190618/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1190618,
    "url": "http://patchwork.ozlabs.org/api/patches/1190618/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191106100541.48639-2-anthony.l.nguyen@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20191106100541.48639-2-anthony.l.nguyen@intel.com>",
    "list_archive_url": null,
    "date": "2019-11-06T10:05:28",
    "name": "[S32,v3,02/15] ice: Add NDO callback to set the maximum per-queue bitrate",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "119617e1716071614b84c71fda2d4bef51df07a2",
    "submitter": {
        "id": 68875,
        "url": "http://patchwork.ozlabs.org/api/people/68875/?format=api",
        "name": "Tony Nguyen",
        "email": "anthony.l.nguyen@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191106100541.48639-2-anthony.l.nguyen@intel.com/mbox/",
    "series": [
        {
            "id": 141151,
            "url": "http://patchwork.ozlabs.org/api/series/141151/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=141151",
            "date": "2019-11-06T10:05:27",
            "name": "[S32,v3,01/15] ice: Use ice_ena_vsi and ice_dis_vsi in DCB configuration flow",
            "version": 3,
            "mbox": "http://patchwork.ozlabs.org/series/141151/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1190618/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/1190618/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org; spf=pass (sender SPF authorized)\n\tsmtp.mailfrom=osuosl.org (client-ip=140.211.166.138;\n\thelo=whitealder.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 477Zxz3cKjz9sP4\n\tfor <incoming@patchwork.ozlabs.org>;\n\tThu,  7 Nov 2019 05:36:55 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id E9F9489F6F;\n\tWed,  6 Nov 2019 18:36:53 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id ZPl8wVGDYHQe; Wed,  6 Nov 2019 18:36:39 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 657728A0D6;\n\tWed,  6 Nov 2019 18:36:39 +0000 (UTC)",
            "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id B25041BF3FB\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed,  6 Nov 2019 18:36:34 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id A00A88A7ED\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed,  6 Nov 2019 18:36:34 +0000 (UTC)",
            "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id RLZMGZ18Ea+Z for <intel-wired-lan@lists.osuosl.org>;\n\tWed,  6 Nov 2019 18:36:28 +0000 (UTC)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id CA9288A7E8\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed,  6 Nov 2019 18:36:28 +0000 (UTC)",
            "from orsmga006.jf.intel.com ([10.7.209.51])\n\tby fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t06 Nov 2019 10:36:28 -0800",
            "from unknown (HELO localhost.jf.intel.com) ([10.166.244.174])\n\tby orsmga006.jf.intel.com with ESMTP; 06 Nov 2019 10:36:27 -0800"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.68,275,1569308400\"; d=\"scan'208\";a=\"205918150\"",
        "From": "Tony Nguyen <anthony.l.nguyen@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Wed,  6 Nov 2019 02:05:28 -0800",
        "Message-Id": "<20191106100541.48639-2-anthony.l.nguyen@intel.com>",
        "X-Mailer": "git-send-email 2.20.1",
        "In-Reply-To": "<20191106100541.48639-1-anthony.l.nguyen@intel.com>",
        "References": "<20191106100541.48639-1-anthony.l.nguyen@intel.com>",
        "MIME-Version": "1.0",
        "Subject": "[Intel-wired-lan] [PATCH S32 v3 02/15] ice: Add NDO callback to set\n\tthe maximum per-queue bitrate",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "From: Usha Ketineni <usha.k.ketineni@intel.com>\n\nAllow for rate limiting Tx queues. Bitrate is set in\nMbps(megabits per second).\n\nMbps max-rate is set for the queue via sysfs:\n/sys/class/net/<iface>/queues/tx-<queue>/tx_maxrate\nex: echo 100 >/sys/class/net/ens7/queues/tx-0/tx_maxrate\n    echo 200 >/sys/class/net/ens7/queues/tx-1/tx_maxrate\nNote: A value of zero for tx_maxrate means disabled,\ndefault is disabled.\n\nSigned-off-by: Usha Ketineni <usha.k.ketineni@intel.com>\nCo-developed-by: Tarun Singh <tarun.k.singh@intel.com>\nSigned-off-by: Tarun Singh <tarun.k.singh@intel.com>\nSigned-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>\n---\n .../net/ethernet/intel/ice/ice_adminq_cmd.h   |   46 +\n drivers/net/ethernet/intel/ice/ice_common.c   |   10 +-\n drivers/net/ethernet/intel/ice/ice_common.h   |    2 +\n drivers/net/ethernet/intel/ice/ice_dcb_lib.c  |   10 +\n drivers/net/ethernet/intel/ice/ice_dcb_lib.h  |    8 +\n drivers/net/ethernet/intel/ice/ice_main.c     |   43 +\n drivers/net/ethernet/intel/ice/ice_sched.c    | 1264 ++++++++++++++++-\n drivers/net/ethernet/intel/ice/ice_sched.h    |   39 +\n drivers/net/ethernet/intel/ice/ice_switch.h   |    5 -\n drivers/net/ethernet/intel/ice/ice_type.h     |   63 +-\n 10 files changed, 1480 insertions(+), 10 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\nindex f6b4d300543d..5422a2f2c8e0 100644\n--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n@@ -742,6 +742,10 @@ struct ice_aqc_add_elem {\n \tstruct ice_aqc_txsched_elem_data generic[1];\n };\n \n+struct ice_aqc_conf_elem {\n+\tstruct ice_aqc_txsched_elem_data generic[1];\n+};\n+\n struct ice_aqc_get_elem {\n \tstruct ice_aqc_txsched_elem_data generic[1];\n };\n@@ -783,6 +787,44 @@ struct ice_aqc_port_ets_elem {\n \t__le32 tc_node_teid[8]; /* Used for response, reserved in command */\n };\n \n+/* Rate limiting profile for\n+ * Add RL profile (indirect 0x0410)\n+ * Query RL profile (indirect 0x0411)\n+ * Remove RL profile (indirect 0x0415)\n+ * These indirect commands acts on single or multiple\n+ * RL profiles with specified data.\n+ */\n+struct ice_aqc_rl_profile {\n+\t__le16 num_profiles;\n+\t__le16 num_processed; /* Only for response. Reserved in Command. */\n+\tu8 reserved[4];\n+\t__le32 addr_high;\n+\t__le32 addr_low;\n+};\n+\n+struct ice_aqc_rl_profile_elem {\n+\tu8 level;\n+\tu8 flags;\n+#define ICE_AQC_RL_PROFILE_TYPE_S\t0x0\n+#define ICE_AQC_RL_PROFILE_TYPE_M\t(0x3 << ICE_AQC_RL_PROFILE_TYPE_S)\n+#define ICE_AQC_RL_PROFILE_TYPE_CIR\t0\n+#define ICE_AQC_RL_PROFILE_TYPE_EIR\t1\n+#define ICE_AQC_RL_PROFILE_TYPE_SRL\t2\n+/* The following flag is used for Query RL Profile Data */\n+#define ICE_AQC_RL_PROFILE_INVAL_S\t0x7\n+#define ICE_AQC_RL_PROFILE_INVAL_M\t(0x1 << ICE_AQC_RL_PROFILE_INVAL_S)\n+\n+\t__le16 profile_id;\n+\t__le16 max_burst_size;\n+\t__le16 rl_multiply;\n+\t__le16 wake_up_calc;\n+\t__le16 rl_encode;\n+};\n+\n+struct ice_aqc_rl_profile_generic_elem {\n+\tstruct ice_aqc_rl_profile_elem generic[1];\n+};\n+\n /* Query Scheduler Resource Allocation (indirect 0x0412)\n  * This indirect command retrieves the scheduler resources allocated by\n  * EMP Firmware to the given PF.\n@@ -1659,6 +1701,7 @@ struct ice_aq_desc {\n \t\tstruct ice_aqc_sched_elem_cmd sched_elem_cmd;\n \t\tstruct ice_aqc_query_txsched_res query_sched_res;\n \t\tstruct ice_aqc_query_port_ets port_ets;\n+\t\tstruct ice_aqc_rl_profile rl_profile;\n \t\tstruct ice_aqc_nvm nvm;\n \t\tstruct ice_aqc_nvm_checksum nvm_checksum;\n \t\tstruct ice_aqc_pf_vf_msg virt;\n@@ -1761,12 +1804,15 @@ enum ice_adminq_opc {\n \t/* transmit scheduler commands */\n \tice_aqc_opc_get_dflt_topo\t\t\t= 0x0400,\n \tice_aqc_opc_add_sched_elems\t\t\t= 0x0401,\n+\tice_aqc_opc_cfg_sched_elems\t\t\t= 0x0403,\n \tice_aqc_opc_get_sched_elems\t\t\t= 0x0404,\n \tice_aqc_opc_suspend_sched_elems\t\t\t= 0x0409,\n \tice_aqc_opc_resume_sched_elems\t\t\t= 0x040A,\n \tice_aqc_opc_query_port_ets\t\t\t= 0x040E,\n \tice_aqc_opc_delete_sched_elems\t\t\t= 0x040F,\n+\tice_aqc_opc_add_rl_profiles\t\t\t= 0x0410,\n \tice_aqc_opc_query_sched_res\t\t\t= 0x0412,\n+\tice_aqc_opc_remove_rl_profiles\t\t\t= 0x0415,\n \n \t/* PHY commands */\n \tice_aqc_opc_get_phy_caps\t\t\t= 0x0600,\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c\nindex 3bc794b3376e..eb01c218c596 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.c\n+++ b/drivers/net/ethernet/intel/ice/ice_common.c\n@@ -877,6 +877,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)\n \t\tgoto err_unroll_sched;\n \t}\n \tINIT_LIST_HEAD(&hw->agg_list);\n+\t/* Initialize max burst size */\n+\tif (!hw->max_burst_size)\n+\t\tice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);\n \n \tstatus = ice_init_fltr_mgmt_struct(hw);\n \tif (status)\n@@ -3282,7 +3285,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)\n  * @tc: TC number\n  * @q_handle: software queue handle\n  */\n-static struct ice_q_ctx *\n+struct ice_q_ctx *\n ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)\n {\n \tstruct ice_vsi_ctx *vsi;\n@@ -3379,9 +3382,12 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,\n \tnode.node_teid = buf->txqs[0].q_teid;\n \tnode.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;\n \tq_ctx->q_handle = q_handle;\n+\tq_ctx->q_teid = le32_to_cpu(node.node_teid);\n \n-\t/* add a leaf node into schduler tree queue layer */\n+\t/* add a leaf node into scheduler tree queue layer */\n \tstatus = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);\n+\tif (!status)\n+\t\tstatus = ice_sched_replay_q_bw(pi, q_ctx);\n \n ena_txq_exit:\n \tmutex_unlock(&pi->sched_lock);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h\nindex d2491c947629..c17c5724d33b 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.h\n+++ b/drivers/net/ethernet/intel/ice/ice_common.h\n@@ -145,6 +145,8 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,\n enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);\n void ice_replay_post(struct ice_hw *hw);\n void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);\n+struct ice_q_ctx *\n+ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);\n void\n ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,\n \t\t  u64 *prev_stat, u64 *cur_stat);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c\nindex baea28c712ee..c00c68bacadb 100644\n--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c\n+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c\n@@ -101,6 +101,16 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)\n \treturn ret;\n }\n \n+/**\n+ * ice_dcb_get_tc - Get the TC associated with the queue\n+ * @vsi: ptr to the VSI\n+ * @queue_index: queue number associated with VSI\n+ */\n+u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)\n+{\n+\treturn vsi->tx_rings[queue_index]->dcb_tc;\n+}\n+\n /**\n  * ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC\n  * @vsi: VSI owner of rings being updated\ndiff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h\nindex d11a0aab01ac..59e40cf2dd73 100644\n--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h\n+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h\n@@ -14,6 +14,7 @@\n void ice_dcb_rebuild(struct ice_pf *pf);\n u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);\n u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);\n+u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);\n void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);\n int ice_init_pf_dcb(struct ice_pf *pf, bool locked);\n void ice_update_dcb_stats(struct ice_pf *pf);\n@@ -42,6 +43,13 @@ static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)\n \treturn 1;\n }\n \n+static inline u8\n+ice_dcb_get_tc(struct ice_vsi __always_unused *vsi,\n+\t       int __always_unused queue_index)\n+{\n+\treturn 0;\n+}\n+\n static inline int\n ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked)\n {\ndiff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c\nindex dd05505ccbe1..fec58433bafb 100644\n--- a/drivers/net/ethernet/intel/ice/ice_main.c\n+++ b/drivers/net/ethernet/intel/ice/ice_main.c\n@@ -3796,6 +3796,48 @@ static void ice_set_rx_mode(struct net_device *netdev)\n \tice_service_task_schedule(vsi->back);\n }\n \n+/**\n+ * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate\n+ * @netdev: network interface device structure\n+ * @queue_index: Queue ID\n+ * @maxrate: maximum bandwidth in Mbps\n+ */\n+static int\n+ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tenum ice_status status;\n+\tu16 q_handle;\n+\tu8 tc;\n+\n+\t/* Validate maxrate requested is within permitted range */\n+\tif (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {\n+\t\tnetdev_err(netdev,\n+\t\t\t   \"Invalid max rate %d specified for the queue %d\\n\",\n+\t\t\t   maxrate, queue_index);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tq_handle = vsi->tx_rings[queue_index]->q_handle;\n+\ttc = ice_dcb_get_tc(vsi, queue_index);\n+\n+\t/* Set BW back to default, when user set maxrate to 0 */\n+\tif (!maxrate)\n+\t\tstatus = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,\n+\t\t\t\t\t       q_handle, ICE_MAX_BW);\n+\telse\n+\t\tstatus = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,\n+\t\t\t\t\t  q_handle, ICE_MAX_BW, maxrate * 1000);\n+\tif (status) {\n+\t\tnetdev_err(netdev,\n+\t\t\t   \"Unable to set Tx max rate, error %d\\n\", status);\n+\t\treturn -EIO;\n+\t}\n+\n+\treturn 0;\n+}\n+\n /**\n  * ice_fdb_add - add an entry to the hardware database\n  * @ndm: the input from the stack\n@@ -5307,6 +5349,7 @@ static const struct net_device_ops ice_netdev_ops = {\n \t.ndo_validate_addr = eth_validate_addr,\n \t.ndo_change_mtu = ice_change_mtu,\n \t.ndo_get_stats64 = ice_get_stats64,\n+\t.ndo_set_tx_maxrate = ice_set_tx_maxrate,\n \t.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,\n \t.ndo_set_vf_mac = ice_set_vf_mac,\n \t.ndo_get_vf_config = ice_get_vf_cfg,\ndiff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c\nindex fc624b73d05d..dac5a6712610 100644\n--- a/drivers/net/ethernet/intel/ice/ice_sched.c\n+++ b/drivers/net/ethernet/intel/ice/ice_sched.c\n@@ -410,6 +410,27 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,\n \t\t\t\t\t   grps_added, cd);\n }\n \n+/**\n+ * ice_aq_cfg_sched_elems - configures scheduler elements\n+ * @hw: pointer to the HW struct\n+ * @elems_req: number of elements to configure\n+ * @buf: pointer to buffer\n+ * @buf_size: buffer size in bytes\n+ * @elems_cfgd: returns total number of elements configured\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Configure scheduling elements (0x0403)\n+ */\n+static enum ice_status\n+ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,\n+\t\t       struct ice_aqc_conf_elem *buf, u16 buf_size,\n+\t\t       u16 *elems_cfgd, struct ice_sq_cd *cd)\n+{\n+\treturn ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,\n+\t\t\t\t\t   elems_req, (void *)buf, buf_size,\n+\t\t\t\t\t   elems_cfgd, cd);\n+}\n+\n /**\n  * ice_aq_suspend_sched_elems - suspend scheduler elements\n  * @hw: pointer to the HW struct\n@@ -556,6 +577,149 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)\n \treturn 0;\n }\n \n+/**\n+ * ice_aq_rl_profile - performs a rate limiting task\n+ * @hw: pointer to the HW struct\n+ * @opcode:opcode for add, query, or remove profile(s)\n+ * @num_profiles: the number of profiles\n+ * @buf: pointer to buffer\n+ * @buf_size: buffer size in bytes\n+ * @num_processed: number of processed add or remove profile(s) to return\n+ * @cd: pointer to command details structure\n+ *\n+ * RL profile function to add, query, or remove profile(s)\n+ */\n+static enum ice_status\n+ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,\n+\t\t  u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,\n+\t\t  u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_rl_profile *cmd;\n+\tstruct ice_aq_desc desc;\n+\tenum ice_status status;\n+\n+\tcmd = &desc.params.rl_profile;\n+\n+\tice_fill_dflt_direct_cmd_desc(&desc, opcode);\n+\tdesc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);\n+\tcmd->num_profiles = cpu_to_le16(num_profiles);\n+\tstatus = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);\n+\tif (!status && num_processed)\n+\t\t*num_processed = le16_to_cpu(cmd->num_processed);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_aq_add_rl_profile - adds rate limiting profile(s)\n+ * @hw: pointer to the HW struct\n+ * @num_profiles: the number of profile(s) to be add\n+ * @buf: pointer to buffer\n+ * @buf_size: buffer size in bytes\n+ * @num_profiles_added: total number of profiles added to return\n+ * @cd: pointer to command details structure\n+ *\n+ * Add RL profile (0x0410)\n+ */\n+static enum ice_status\n+ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,\n+\t\t      struct ice_aqc_rl_profile_generic_elem *buf,\n+\t\t      u16 buf_size, u16 *num_profiles_added,\n+\t\t      struct ice_sq_cd *cd)\n+{\n+\treturn ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,\n+\t\t\t\t num_profiles, buf,\n+\t\t\t\t buf_size, num_profiles_added, cd);\n+}\n+\n+/**\n+ * ice_aq_remove_rl_profile - removes RL profile(s)\n+ * @hw: pointer to the HW struct\n+ * @num_profiles: the number of profile(s) to remove\n+ * @buf: pointer to buffer\n+ * @buf_size: buffer size in bytes\n+ * @num_profiles_removed: total number of profiles removed to return\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Remove RL profile (0x0415)\n+ */\n+static enum ice_status\n+ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,\n+\t\t\t struct ice_aqc_rl_profile_generic_elem *buf,\n+\t\t\t u16 buf_size, u16 *num_profiles_removed,\n+\t\t\t struct ice_sq_cd *cd)\n+{\n+\treturn ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,\n+\t\t\t\t num_profiles, buf,\n+\t\t\t\t buf_size, num_profiles_removed, cd);\n+}\n+\n+/**\n+ * ice_sched_del_rl_profile - remove RL profile\n+ * @hw: pointer to the HW struct\n+ * @rl_info: rate limit profile information\n+ *\n+ * If the profile ID is not referenced anymore, it removes profile ID with\n+ * its associated parameters from HW DB,and locally. The caller needs to\n+ * hold scheduler lock.\n+ */\n+static enum ice_status\n+ice_sched_del_rl_profile(struct ice_hw *hw,\n+\t\t\t struct ice_aqc_rl_profile_info *rl_info)\n+{\n+\tstruct ice_aqc_rl_profile_generic_elem *buf;\n+\tu16 num_profiles_removed;\n+\tenum ice_status status;\n+\tu16 num_profiles = 1;\n+\n+\tif (rl_info->prof_id_ref != 0)\n+\t\treturn ICE_ERR_IN_USE;\n+\n+\t/* Safe to remove profile ID */\n+\tbuf = (struct ice_aqc_rl_profile_generic_elem *)\n+\t\t&rl_info->profile;\n+\tstatus = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),\n+\t\t\t\t\t  &num_profiles_removed, NULL);\n+\tif (status || num_profiles_removed != num_profiles)\n+\t\treturn ICE_ERR_CFG;\n+\n+\t/* Delete stale entry now */\n+\tlist_del(&rl_info->list_entry);\n+\tdevm_kfree(ice_hw_to_dev(hw), rl_info);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_sched_clear_rl_prof - clears RL prof entries\n+ * @pi: port information structure\n+ *\n+ * This function removes all RL profile from HW as well as from SW DB.\n+ */\n+static void ice_sched_clear_rl_prof(struct ice_port_info *pi)\n+{\n+\tu16 ln;\n+\n+\tfor (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {\n+\t\tstruct ice_aqc_rl_profile_info *rl_prof_elem;\n+\t\tstruct ice_aqc_rl_profile_info *rl_prof_tmp;\n+\n+\t\tlist_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,\n+\t\t\t\t\t &pi->rl_prof_list[ln], list_entry) {\n+\t\t\tstruct ice_hw *hw = pi->hw;\n+\t\t\tenum ice_status status;\n+\n+\t\t\trl_prof_elem->prof_id_ref = 0;\n+\t\t\tstatus = ice_sched_del_rl_profile(hw, rl_prof_elem);\n+\t\t\tif (status) {\n+\t\t\t\tice_debug(hw, ICE_DBG_SCHED,\n+\t\t\t\t\t  \"Remove rl profile failed\\n\");\n+\t\t\t\t/* On error, free mem required */\n+\t\t\t\tlist_del(&rl_prof_elem->list_entry);\n+\t\t\t\tdevm_kfree(ice_hw_to_dev(hw), rl_prof_elem);\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n /**\n  * ice_sched_clear_agg - clears the aggregator related information\n  * @hw: pointer to the hardware structure\n@@ -592,6 +756,8 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)\n {\n \tif (!pi)\n \t\treturn;\n+\t/* remove RL profiles related lists */\n+\tice_sched_clear_rl_prof(pi);\n \tif (pi->root) {\n \t\tice_free_sched_node(pi, pi->root);\n \t\tpi->root = NULL;\n@@ -1014,6 +1180,8 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)\n \t/* initialize the port for handling the scheduler tree */\n \tpi->port_state = ICE_SCHED_PORT_STATE_READY;\n \tmutex_init(&pi->sched_lock);\n+\tfor (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)\n+\t\tINIT_LIST_HEAD(&pi->rl_prof_list[i]);\n \n err_init_port:\n \tif (status && pi->root) {\n@@ -1062,8 +1230,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)\n \t * and so on. This array will be populated from root (index 0) to\n \t * qgroup layer 7. Leaf node has no children.\n \t */\n-\tfor (i = 0; i < hw->num_tx_sched_layers; i++) {\n-\t\tmax_sibl = buf->layer_props[i].max_sibl_grp_sz;\n+\tfor (i = 0; i < hw->num_tx_sched_layers - 1; i++) {\n+\t\tmax_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;\n \t\thw->max_children[i] = le16_to_cpu(max_sibl);\n \t}\n \n@@ -1670,3 +1838,1095 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)\n {\n \treturn ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);\n }\n+\n+/**\n+ * ice_sched_rm_unused_rl_prof - remove unused RL profile\n+ * @pi: port information structure\n+ *\n+ * This function removes unused rate limit profiles from the HW and\n+ * SW DB. The caller needs to hold scheduler lock.\n+ */\n+static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)\n+{\n+\tu16 ln;\n+\n+\tfor (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {\n+\t\tstruct ice_aqc_rl_profile_info *rl_prof_elem;\n+\t\tstruct ice_aqc_rl_profile_info *rl_prof_tmp;\n+\n+\t\tlist_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,\n+\t\t\t\t\t &pi->rl_prof_list[ln], list_entry) {\n+\t\t\tif (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))\n+\t\t\t\tice_debug(pi->hw, ICE_DBG_SCHED,\n+\t\t\t\t\t  \"Removed rl profile\\n\");\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * ice_sched_update_elem - update element\n+ * @hw: pointer to the HW struct\n+ * @node: pointer to node\n+ * @info: node info to update\n+ *\n+ * It updates the HW DB, and local SW DB of node. It updates the scheduling\n+ * parameters of node from argument info data buffer (Info->data buf) and\n+ * returns success or error on config sched element failure. The caller\n+ * needs to hold scheduler lock.\n+ */\n+static enum ice_status\n+ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,\n+\t\t      struct ice_aqc_txsched_elem_data *info)\n+{\n+\tstruct ice_aqc_conf_elem buf;\n+\tenum ice_status status;\n+\tu16 elem_cfgd = 0;\n+\tu16 num_elems = 1;\n+\n+\tbuf.generic[0] = *info;\n+\t/* Parent TEID is reserved field in this aq call */\n+\tbuf.generic[0].parent_teid = 0;\n+\t/* Element type is reserved field in this aq call */\n+\tbuf.generic[0].data.elem_type = 0;\n+\t/* Flags is reserved field in this aq call */\n+\tbuf.generic[0].data.flags = 0;\n+\n+\t/* Update HW DB */\n+\t/* Configure element node */\n+\tstatus = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),\n+\t\t\t\t\t&elem_cfgd, NULL);\n+\tif (status || elem_cfgd != num_elems) {\n+\t\tice_debug(hw, ICE_DBG_SCHED, \"Config sched elem error\\n\");\n+\t\treturn ICE_ERR_CFG;\n+\t}\n+\n+\t/* Config success case */\n+\t/* Now update local SW DB */\n+\t/* Only copy the data portion of info buffer */\n+\tnode->info.data = info->data;\n+\treturn status;\n+}\n+\n+/**\n+ * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params\n+ * @hw: pointer to the HW struct\n+ * @node: sched node to configure\n+ * @rl_type: rate limit type CIR, EIR, or shared\n+ * @bw_alloc: BW weight/allocation\n+ *\n+ * This function configures node element's BW allocation.\n+ */\n+static enum ice_status\n+ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,\n+\t\t\t    enum ice_rl_type rl_type, u8 bw_alloc)\n+{\n+\tstruct ice_aqc_txsched_elem_data buf;\n+\tstruct ice_aqc_txsched_elem *data;\n+\tenum ice_status status;\n+\n+\tbuf = node->info;\n+\tdata = &buf.data;\n+\tif (rl_type == ICE_MIN_BW) {\n+\t\tdata->valid_sections |= ICE_AQC_ELEM_VALID_CIR;\n+\t\tdata->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);\n+\t} else if (rl_type == ICE_MAX_BW) {\n+\t\tdata->valid_sections |= ICE_AQC_ELEM_VALID_EIR;\n+\t\tdata->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);\n+\t} else {\n+\t\treturn ICE_ERR_PARAM;\n+\t}\n+\n+\t/* Configure element */\n+\tstatus = ice_sched_update_elem(hw, node, &buf);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_set_clear_cir_bw - set or clear CIR BW\n+ * @bw_t_info: bandwidth type information structure\n+ * @bw: bandwidth in Kbps - Kilo bits per sec\n+ *\n+ * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.\n+ */\n+static void\n+ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)\n+{\n+\tif (bw == ICE_SCHED_DFLT_BW) {\n+\t\tclear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);\n+\t\tbw_t_info->cir_bw.bw = 0;\n+\t} else {\n+\t\t/* Save type of BW information */\n+\t\tset_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);\n+\t\tbw_t_info->cir_bw.bw = bw;\n+\t}\n+}\n+\n+/**\n+ * ice_set_clear_eir_bw - set or clear EIR BW\n+ * @bw_t_info: bandwidth type information structure\n+ * @bw: bandwidth in Kbps - Kilo bits per sec\n+ *\n+ * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.\n+ */\n+static void\n+ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)\n+{\n+\tif (bw == ICE_SCHED_DFLT_BW) {\n+\t\tclear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);\n+\t\tbw_t_info->eir_bw.bw = 0;\n+\t} else {\n+\t\t/* EIR BW and Shared BW profiles are mutually exclusive and\n+\t\t * hence only one of them may be set for any given element.\n+\t\t * First clear earlier saved shared BW information.\n+\t\t */\n+\t\tclear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);\n+\t\tbw_t_info->shared_bw = 0;\n+\t\t/* save EIR BW information */\n+\t\tset_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);\n+\t\tbw_t_info->eir_bw.bw = bw;\n+\t}\n+}\n+\n+/**\n+ * ice_set_clear_shared_bw - set or clear shared BW\n+ * @bw_t_info: bandwidth type information structure\n+ * @bw: bandwidth in Kbps - Kilo bits per sec\n+ *\n+ * Save or clear shared bandwidth (BW) in the passed param bw_t_info.\n+ */\n+static void\n+ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)\n+{\n+\tif (bw == ICE_SCHED_DFLT_BW) {\n+\t\tclear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);\n+\t\tbw_t_info->shared_bw = 0;\n+\t} else {\n+\t\t/* EIR BW and Shared BW profiles are mutually exclusive and\n+\t\t * hence only one of them may be set for any given element.\n+\t\t * First clear earlier saved EIR BW information.\n+\t\t */\n+\t\tclear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);\n+\t\tbw_t_info->eir_bw.bw = 0;\n+\t\t/* save shared BW information */\n+\t\tset_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);\n+\t\tbw_t_info->shared_bw = bw;\n+\t}\n+}\n+\n+/**\n+ * ice_sched_calc_wakeup - calculate RL profile wakeup parameter\n+ * @bw: bandwidth in Kbps\n+ *\n+ * This function calculates the wakeup parameter of RL profile.\n+ */\n+static u16 ice_sched_calc_wakeup(s32 bw)\n+{\n+\ts64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;\n+\ts32 wakeup_f_int;\n+\tu16 wakeup = 0;\n+\n+\t/* Get the wakeup integer value */\n+\tbytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);\n+\twakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec);\n+\tif (wakeup_int > 63) {\n+\t\twakeup = (u16)((1 << 15) | wakeup_int);\n+\t} else {\n+\t\t/* Calculate fraction value up to 4 decimals\n+\t\t * Convert Integer value to a constant multiplier\n+\t\t */\n+\t\twakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;\n+\t\twakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *\n+\t\t\t\t\t   ICE_RL_PROF_FREQUENCY,\n+\t\t\t\t      bytes_per_sec);\n+\n+\t\t/* Get Fraction value */\n+\t\twakeup_f = wakeup_a - wakeup_b;\n+\n+\t\t/* Round up the Fractional value via Ceil(Fractional value) */\n+\t\tif (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))\n+\t\t\twakeup_f += 1;\n+\n+\t\twakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,\n+\t\t\t\t\t       ICE_RL_PROF_MULTIPLIER);\n+\t\twakeup |= (u16)(wakeup_int << 9);\n+\t\twakeup |= (u16)(0x1ff & wakeup_f_int);\n+\t}\n+\n+\treturn wakeup;\n+}\n+\n+/**\n+ * ice_sched_bw_to_rl_profile - convert BW to profile parameters\n+ * @bw: bandwidth in Kbps\n+ * @profile: profile parameters to return\n+ *\n+ * This function converts the BW to profile structure format.\n+ */\n+static enum ice_status\n+ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)\n+{\n+\tenum ice_status status = ICE_ERR_PARAM;\n+\ts64 bytes_per_sec, ts_rate, mv_tmp;\n+\tbool found = false;\n+\ts32 encode = 0;\n+\ts64 mv = 0;\n+\ts32 i;\n+\n+\t/* Bw settings range is from 0.5Mb/sec to 100Gb/sec */\n+\tif (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)\n+\t\treturn status;\n+\n+\t/* Bytes per second from Kbps */\n+\tbytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);\n+\n+\t/* encode is 6 bits but really useful are 5 bits */\n+\tfor (i = 0; i < 64; i++) {\n+\t\tu64 pow_result = BIT_ULL(i);\n+\n+\t\tts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY,\n+\t\t\t\t     pow_result * ICE_RL_PROF_TS_MULTIPLIER);\n+\t\tif (ts_rate <= 0)\n+\t\t\tcontinue;\n+\n+\t\t/* Multiplier value */\n+\t\tmv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,\n+\t\t\t\t    ts_rate);\n+\n+\t\t/* Round to the nearest ICE_RL_PROF_MULTIPLIER */\n+\t\tmv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);\n+\n+\t\t/* First multiplier value greater than the given\n+\t\t * accuracy bytes\n+\t\t */\n+\t\tif (mv > ICE_RL_PROF_ACCURACY_BYTES) {\n+\t\t\tencode = i;\n+\t\t\tfound = true;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\tif (found) {\n+\t\tu16 wm;\n+\n+\t\twm = ice_sched_calc_wakeup(bw);\n+\t\tprofile->rl_multiply = cpu_to_le16(mv);\n+\t\tprofile->wake_up_calc = cpu_to_le16(wm);\n+\t\tprofile->rl_encode = cpu_to_le16(encode);\n+\t\tstatus = 0;\n+\t} else {\n+\t\tstatus = ICE_ERR_DOES_NOT_EXIST;\n+\t}\n+\n+\treturn status;\n+}\n+\n+/**\n+ * ice_sched_add_rl_profile - add RL profile\n+ * @pi: port information structure\n+ * @rl_type: type of rate limit BW - min, max, or shared\n+ * @bw: bandwidth in Kbps - Kilo bits per sec\n+ * @layer_num: specifies in which layer to create profile\n+ *\n+ * This function first checks the existing list for corresponding BW\n+ * parameter. If it exists, it returns the associated profile otherwise\n+ * it creates a new rate limit profile for requested BW, and adds it to\n+ * the HW DB and local list. It returns the new profile or null on error.\n+ * The caller needs to hold the scheduler lock.\n+ */\n+static struct ice_aqc_rl_profile_info *\n+ice_sched_add_rl_profile(struct ice_port_info *pi,\n+\t\t\t enum ice_rl_type rl_type, u32 bw, u8 layer_num)\n+{\n+\tstruct ice_aqc_rl_profile_generic_elem *buf;\n+\tstruct ice_aqc_rl_profile_info *rl_prof_elem;\n+\tu16 profiles_added = 0, num_profiles = 1;\n+\tenum ice_status status;\n+\tstruct ice_hw *hw;\n+\tu8 profile_type;\n+\n+\tif (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)\n+\t\treturn NULL;\n+\tswitch (rl_type) {\n+\tcase ICE_MIN_BW:\n+\t\tprofile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;\n+\t\tbreak;\n+\tcase ICE_MAX_BW:\n+\t\tprofile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;\n+\t\tbreak;\n+\tcase ICE_SHARED_BW:\n+\t\tprofile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn NULL;\n+\t}\n+\n+\tif (!pi)\n+\t\treturn NULL;\n+\thw = pi->hw;\n+\tlist_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],\n+\t\t\t    list_entry)\n+\t\tif (rl_prof_elem->profile.flags == profile_type &&\n+\t\t    rl_prof_elem->bw == bw)\n+\t\t\t/* Return existing profile ID info */\n+\t\t\treturn rl_prof_elem;\n+\n+\t/* Create new profile ID */\n+\trl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),\n+\t\t\t\t    GFP_KERNEL);\n+\n+\tif (!rl_prof_elem)\n+\t\treturn NULL;\n+\n+\tstatus = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile);\n+\tif (status)\n+\t\tgoto exit_add_rl_prof;\n+\n+\trl_prof_elem->bw = bw;\n+\t/* layer_num is zero relative, and fw expects level from 1 to 9 */\n+\trl_prof_elem->profile.level = layer_num + 1;\n+\trl_prof_elem->profile.flags = profile_type;\n+\trl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);\n+\n+\t/* Create new entry in HW DB */\n+\tbuf = (struct ice_aqc_rl_profile_generic_elem *)\n+\t\t&rl_prof_elem->profile;\n+\tstatus = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),\n+\t\t\t\t       &profiles_added, NULL);\n+\tif (status || profiles_added != num_profiles)\n+\t\tgoto exit_add_rl_prof;\n+\n+\t/* Good entry - add in the list */\n+\trl_prof_elem->prof_id_ref = 0;\n+\tlist_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);\n+\treturn rl_prof_elem;\n+\n+exit_add_rl_prof:\n+\tdevm_kfree(ice_hw_to_dev(hw), rl_prof_elem);\n+\treturn NULL;\n+}\n+\n+/**\n+ * ice_sched_cfg_node_bw_lmt - configure node sched params\n+ * @hw: pointer to the HW struct\n+ * @node: sched node to configure\n+ * @rl_type: rate limit type CIR, EIR, or shared\n+ * @rl_prof_id: rate limit profile ID\n+ *\n+ * This function configures node element's BW limit.\n+ */\n+static enum ice_status\n+ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,\n+\t\t\t  enum ice_rl_type rl_type, u16 rl_prof_id)\n+{\n+\tstruct ice_aqc_txsched_elem_data buf;\n+\tstruct ice_aqc_txsched_elem *data;\n+\n+\tbuf = node->info;\n+\tdata = &buf.data;\n+\tswitch (rl_type) {\n+\tcase ICE_MIN_BW:\n+\t\tdata->valid_sections |= ICE_AQC_ELEM_VALID_CIR;\n+\t\tdata->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);\n+\t\tbreak;\n+\tcase ICE_MAX_BW:\n+\t\t/* EIR BW and Shared BW profiles are mutually exclusive and\n+\t\t * hence only one of them may be set for any given element\n+\t\t */\n+\t\tif (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)\n+\t\t\treturn ICE_ERR_CFG;\n+\t\tdata->valid_sections |= ICE_AQC_ELEM_VALID_EIR;\n+\t\tdata->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);\n+\t\tbreak;\n+\tcase ICE_SHARED_BW:\n+\t\t/* Check for removing shared BW */\n+\t\tif (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {\n+\t\t\t/* remove shared profile */\n+\t\t\tdata->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;\n+\t\t\tdata->srl_id = 0; /* clear SRL field */\n+\n+\t\t\t/* enable back EIR to default profile */\n+\t\t\tdata->valid_sections |= ICE_AQC_ELEM_VALID_EIR;\n+\t\t\tdata->eir_bw.bw_profile_idx =\n+\t\t\t\tcpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);\n+\t\t\tbreak;\n+\t\t}\n+\t\t/* EIR BW and Shared BW profiles are mutually exclusive and\n+\t\t * hence only one of them may be set for any given element\n+\t\t */\n+\t\tif ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&\n+\t\t    (le16_to_cpu(data->eir_bw.bw_profile_idx) !=\n+\t\t\t    ICE_SCHED_DFLT_RL_PROF_ID))\n+\t\t\treturn ICE_ERR_CFG;\n+\t\t/* EIR BW is set to default, disable it */\n+\t\tdata->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;\n+\t\t/* Okay to enable shared BW now */\n+\t\tdata->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;\n+\t\tdata->srl_id = cpu_to_le16(rl_prof_id);\n+\t\tbreak;\n+\tdefault:\n+\t\t/* Unknown rate limit type */\n+\t\treturn ICE_ERR_PARAM;\n+\t}\n+\n+\t/* Configure element */\n+\treturn ice_sched_update_elem(hw, node, &buf);\n+}\n+\n+/**\n+ * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID\n+ * @node: sched node\n+ * @rl_type: rate limit type\n+ *\n+ * If existing profile matches, it returns the corresponding rate\n+ * limit profile ID, otherwise it returns an invalid ID as error.\n+ */\n+static u16\n+ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,\n+\t\t\t      enum ice_rl_type rl_type)\n+{\n+\tu16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;\n+\tstruct ice_aqc_txsched_elem *data;\n+\n+\tdata = &node->info.data;\n+\tswitch (rl_type) {\n+\tcase ICE_MIN_BW:\n+\t\tif (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)\n+\t\t\trl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);\n+\t\tbreak;\n+\tcase ICE_MAX_BW:\n+\t\tif (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)\n+\t\t\trl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);\n+\t\tbreak;\n+\tcase ICE_SHARED_BW:\n+\t\tif (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)\n+\t\t\trl_prof_id = le16_to_cpu(data->srl_id);\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\treturn rl_prof_id;\n+}\n+\n+/**\n+ * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer\n+ * @pi: port information structure\n+ * @rl_type: type of rate limit BW - min, max, or shared\n+ * @layer_index: layer index\n+ *\n+ * This function returns requested profile creation layer.\n+ */\n+static u8\n+ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,\n+\t\t\t    u8 layer_index)\n+{\n+\tstruct ice_hw *hw = pi->hw;\n+\n+\tif (layer_index >= hw->num_tx_sched_layers)\n+\t\treturn ICE_SCHED_INVAL_LAYER_NUM;\n+\tswitch (rl_type) {\n+\tcase ICE_MIN_BW:\n+\t\tif (hw->layer_info[layer_index].max_cir_rl_profiles)\n+\t\t\treturn layer_index;\n+\t\tbreak;\n+\tcase ICE_MAX_BW:\n+\t\tif (hw->layer_info[layer_index].max_eir_rl_profiles)\n+\t\t\treturn layer_index;\n+\t\tbreak;\n+\tcase ICE_SHARED_BW:\n+\t\t/* if current layer doesn't support SRL profile creation\n+\t\t * then try a layer up or down.\n+\t\t */\n+\t\tif (hw->layer_info[layer_index].max_srl_profiles)\n+\t\t\treturn layer_index;\n+\t\telse if (layer_index < hw->num_tx_sched_layers - 1 &&\n+\t\t\t hw->layer_info[layer_index + 1].max_srl_profiles)\n+\t\t\treturn layer_index + 1;\n+\t\telse if (layer_index > 0 &&\n+\t\t\t hw->layer_info[layer_index - 1].max_srl_profiles)\n+\t\t\treturn layer_index - 1;\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\treturn ICE_SCHED_INVAL_LAYER_NUM;\n+}\n+\n+/**\n+ * ice_sched_get_srl_node - get shared rate limit node\n+ * @node: tree node\n+ * @srl_layer: shared rate limit layer\n+ *\n+ * This function returns SRL node to be used for shared rate limit purpose.\n+ * The caller needs to hold scheduler lock.\n+ */\n+static struct ice_sched_node *\n+ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)\n+{\n+\tif (srl_layer > node->tx_sched_layer)\n+\t\treturn node->children[0];\n+\telse if (srl_layer < node->tx_sched_layer)\n+\t\t/* Node can't be created without a parent. It will always\n+\t\t * have a valid parent except root node.\n+\t\t */\n+\t\treturn node->parent;\n+\telse\n+\t\treturn node;\n+}\n+\n+/**\n+ * ice_sched_rm_rl_profile - remove RL profile ID\n+ * @pi: port information structure\n+ * @layer_num: layer number where profiles are saved\n+ * @profile_type: profile type like EIR, CIR, or SRL\n+ * @profile_id: profile ID to remove\n+ *\n+ * This function removes rate limit profile from layer 'layer_num' of type\n+ * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold\n+ * scheduler lock.\n+ */\n+static enum ice_status\n+ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,\n+\t\t\tu16 profile_id)\n+{\n+\tstruct ice_aqc_rl_profile_info *rl_prof_elem;\n+\tenum ice_status status = 0;\n+\n+\tif (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)\n+\t\treturn ICE_ERR_PARAM;\n+\t/* Check the existing list for RL profile */\n+\tlist_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],\n+\t\t\t    list_entry)\n+\t\tif (rl_prof_elem->profile.flags == profile_type &&\n+\t\t    le16_to_cpu(rl_prof_elem->profile.profile_id) ==\n+\t\t    profile_id) {\n+\t\t\tif (rl_prof_elem->prof_id_ref)\n+\t\t\t\trl_prof_elem->prof_id_ref--;\n+\n+\t\t\t/* Remove old profile ID from database */\n+\t\t\tstatus = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);\n+\t\t\tif (status && status != ICE_ERR_IN_USE)\n+\t\t\t\tice_debug(pi->hw, ICE_DBG_SCHED,\n+\t\t\t\t\t  \"Remove rl profile failed\\n\");\n+\t\t\tbreak;\n+\t\t}\n+\tif (status == ICE_ERR_IN_USE)\n+\t\tstatus = 0;\n+\treturn status;\n+}\n+\n+/**\n+ * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default\n+ * @pi: port information structure\n+ * @node: pointer to node structure\n+ * @rl_type: rate limit type min, max, or shared\n+ * @layer_num: layer number where RL profiles are saved\n+ *\n+ * This function configures node element's BW rate limit profile ID of\n+ * type CIR, EIR, or SRL to default. This function needs to be called\n+ * with the scheduler lock held.\n+ */\n+static enum ice_status\n+ice_sched_set_node_bw_dflt(struct ice_port_info *pi,\n+\t\t\t   struct ice_sched_node *node,\n+\t\t\t   enum ice_rl_type rl_type, u8 layer_num)\n+{\n+\tenum ice_status status;\n+\tstruct ice_hw *hw;\n+\tu8 profile_type;\n+\tu16 rl_prof_id;\n+\tu16 old_id;\n+\n+\thw = pi->hw;\n+\tswitch (rl_type) {\n+\tcase ICE_MIN_BW:\n+\t\tprofile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;\n+\t\trl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;\n+\t\tbreak;\n+\tcase ICE_MAX_BW:\n+\t\tprofile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;\n+\t\trl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;\n+\t\tbreak;\n+\tcase ICE_SHARED_BW:\n+\t\tprofile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;\n+\t\t/* No SRL is configured for default case */\n+\t\trl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn ICE_ERR_PARAM;\n+\t}\n+\t/* Save existing RL prof ID for later clean up */\n+\told_id = ice_sched_get_node_rl_prof_id(node, rl_type);\n+\t/* Configure BW scheduling parameters */\n+\tstatus = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);\n+\tif (status)\n+\t\treturn status;\n+\n+\t/* Remove stale RL profile ID */\n+\tif (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||\n+\t    old_id == ICE_SCHED_INVAL_PROF_ID)\n+\t\treturn 0;\n+\n+\treturn ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);\n+}\n+\n+/**\n+ * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness\n+ * @pi: port information structure\n+ * @node: pointer to node structure\n+ * @layer_num: layer number where rate limit profiles are saved\n+ * @rl_type: rate limit type min, max, or shared\n+ * @bw: bandwidth value\n+ *\n+ * This function prepares node element's bandwidth to SRL or EIR exclusively.\n+ * EIR BW and Shared BW profiles are mutually exclusive and hence only one of\n+ * them may be set for any given element. This function needs to be called\n+ * with the scheduler lock held.\n+ */\n+static enum ice_status\n+ice_sched_set_eir_srl_excl(struct ice_port_info *pi,\n+\t\t\t   struct ice_sched_node *node,\n+\t\t\t   u8 layer_num, enum ice_rl_type rl_type, u32 bw)\n+{\n+\tif (rl_type == ICE_SHARED_BW) {\n+\t\t/* SRL node passed in this case, it may be different node */\n+\t\tif (bw == ICE_SCHED_DFLT_BW)\n+\t\t\t/* SRL being removed, ice_sched_cfg_node_bw_lmt()\n+\t\t\t * enables EIR to default. EIR is not set in this\n+\t\t\t * case, so no additional action is required.\n+\t\t\t */\n+\t\t\treturn 0;\n+\n+\t\t/* SRL being configured, set EIR to default here.\n+\t\t * ice_sched_cfg_node_bw_lmt() disables EIR when it\n+\t\t * configures SRL\n+\t\t */\n+\t\treturn ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,\n+\t\t\t\t\t\t  layer_num);\n+\t} else if (rl_type == ICE_MAX_BW &&\n+\t\t   node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {\n+\t\t/* Remove Shared profile. Set default shared BW call\n+\t\t * removes shared profile for a node.\n+\t\t */\n+\t\treturn ice_sched_set_node_bw_dflt(pi, node,\n+\t\t\t\t\t\t  ICE_SHARED_BW,\n+\t\t\t\t\t\t  layer_num);\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_sched_set_node_bw - set node's bandwidth\n+ * @pi: port information structure\n+ * @node: tree node\n+ * @rl_type: rate limit type min, max, or shared\n+ * @bw: bandwidth in Kbps - Kilo bits per sec\n+ * @layer_num: layer number\n+ *\n+ * This function adds new profile corresponding to requested BW, configures\n+ * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile\n+ * ID from local database. The caller needs to hold scheduler lock.\n+ */\n+static enum ice_status\n+ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,\n+\t\t      enum ice_rl_type rl_type, u32 bw, u8 layer_num)\n+{\n+\tstruct ice_aqc_rl_profile_info *rl_prof_info;\n+\tenum ice_status status = ICE_ERR_PARAM;\n+\tstruct ice_hw *hw = pi->hw;\n+\tu16 old_id, rl_prof_id;\n+\n+\trl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);\n+\tif (!rl_prof_info)\n+\t\treturn status;\n+\n+\trl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);\n+\n+\t/* Save existing RL prof ID for later clean up */\n+\told_id = ice_sched_get_node_rl_prof_id(node, rl_type);\n+\t/* Configure BW scheduling parameters */\n+\tstatus = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);\n+\tif (status)\n+\t\treturn status;\n+\n+\t/* New changes has been applied */\n+\t/* Increment the profile ID reference count */\n+\trl_prof_info->prof_id_ref++;\n+\n+\t/* Check for old ID removal */\n+\tif ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||\n+\t    old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)\n+\t\treturn 0;\n+\n+\treturn ice_sched_rm_rl_profile(pi, layer_num,\n+\t\t\t\t       rl_prof_info->profile.flags,\n+\t\t\t\t       old_id);\n+}\n+\n+/**\n+ * ice_sched_set_node_bw_lmt - set node's BW limit\n+ * @pi: port information structure\n+ * @node: tree node\n+ * @rl_type: rate limit type min, max, or shared\n+ * @bw: bandwidth in Kbps - Kilo bits per sec\n+ *\n+ * It updates node's BW limit parameters like BW RL profile ID of type CIR,\n+ * EIR, or SRL. The caller needs to hold scheduler lock.\n+ */\n+static enum ice_status\n+ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,\n+\t\t\t  enum ice_rl_type rl_type, u32 bw)\n+{\n+\tstruct ice_sched_node *cfg_node = node;\n+\tenum ice_status status;\n+\n+\tstruct ice_hw *hw;\n+\tu8 layer_num;\n+\n+\tif (!pi)\n+\t\treturn ICE_ERR_PARAM;\n+\thw = pi->hw;\n+\t/* Remove unused RL profile IDs from HW and SW DB */\n+\tice_sched_rm_unused_rl_prof(pi);\n+\tlayer_num = ice_sched_get_rl_prof_layer(pi, rl_type,\n+\t\t\t\t\t\tnode->tx_sched_layer);\n+\tif (layer_num >= hw->num_tx_sched_layers)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tif (rl_type == ICE_SHARED_BW) {\n+\t\t/* SRL node may be different */\n+\t\tcfg_node = ice_sched_get_srl_node(node, layer_num);\n+\t\tif (!cfg_node)\n+\t\t\treturn ICE_ERR_CFG;\n+\t}\n+\t/* EIR BW and Shared BW profiles are mutually exclusive and\n+\t * hence only one of them may be set for any given element\n+\t */\n+\tstatus = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,\n+\t\t\t\t\t    bw);\n+\tif (status)\n+\t\treturn status;\n+\tif (bw == ICE_SCHED_DFLT_BW)\n+\t\treturn ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,\n+\t\t\t\t\t\t  layer_num);\n+\treturn ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);\n+}\n+\n+/**\n+ * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default\n+ * @pi: port information structure\n+ * @node: pointer to node structure\n+ * @rl_type: rate limit type min, max, or shared\n+ *\n+ * This function configures node element's BW rate limit profile ID of\n+ * type CIR, EIR, or SRL to default. This function needs to be called\n+ * with the scheduler lock held.\n+ */\n+static enum ice_status\n+ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,\n+\t\t\t       struct ice_sched_node *node,\n+\t\t\t       enum ice_rl_type rl_type)\n+{\n+\treturn ice_sched_set_node_bw_lmt(pi, node, rl_type,\n+\t\t\t\t\t ICE_SCHED_DFLT_BW);\n+}\n+\n+/**\n+ * ice_sched_validate_srl_node - Check node for SRL applicability\n+ * @node: sched node to configure\n+ * @sel_layer: selected SRL layer\n+ *\n+ * This function checks if the SRL can be applied to a selceted layer node on\n+ * behalf of the requested node (first argument). This function needs to be\n+ * called with scheduler lock held.\n+ */\n+static enum ice_status\n+ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)\n+{\n+\t/* SRL profiles are not available on all layers. Check if the\n+\t * SRL profile can be applied to a node above or below the\n+\t * requested node. SRL configuration is possible only if the\n+\t * selected layer's node has single child.\n+\t */\n+\tif (sel_layer == node->tx_sched_layer ||\n+\t    ((sel_layer == node->tx_sched_layer + 1) &&\n+\t    node->num_children == 1) ||\n+\t    ((sel_layer == node->tx_sched_layer - 1) &&\n+\t    (node->parent && node->parent->num_children == 1)))\n+\t\treturn 0;\n+\n+\treturn ICE_ERR_CFG;\n+}\n+\n+/**\n+ * ice_sched_save_q_bw - save queue node's BW information\n+ * @q_ctx: queue context structure\n+ * @rl_type: rate limit type min, max, or shared\n+ * @bw: bandwidth in Kbps - Kilo bits per sec\n+ *\n+ * Save BW information of queue type node for post replay use.\n+ */\n+static enum ice_status\n+ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)\n+{\n+\tswitch (rl_type) {\n+\tcase ICE_MIN_BW:\n+\t\tice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);\n+\t\tbreak;\n+\tcase ICE_MAX_BW:\n+\t\tice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);\n+\t\tbreak;\n+\tcase ICE_SHARED_BW:\n+\t\tice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);\n+\t\tbreak;\n+\tdefault:\n+\t\treturn ICE_ERR_PARAM;\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_sched_set_q_bw_lmt - sets queue BW limit\n+ * @pi: port information structure\n+ * @vsi_handle: sw VSI handle\n+ * @tc: traffic class\n+ * @q_handle: software queue handle\n+ * @rl_type: min, max, or shared\n+ * @bw: bandwidth in Kbps\n+ *\n+ * This function sets BW limit of queue scheduling node.\n+ */\n+static enum ice_status\n+ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,\n+\t\t       u16 q_handle, enum ice_rl_type rl_type, u32 bw)\n+{\n+\tenum ice_status status = ICE_ERR_PARAM;\n+\tstruct ice_sched_node *node;\n+\tstruct ice_q_ctx *q_ctx;\n+\n+\tif (!ice_is_vsi_valid(pi->hw, vsi_handle))\n+\t\treturn ICE_ERR_PARAM;\n+\tmutex_lock(&pi->sched_lock);\n+\tq_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);\n+\tif (!q_ctx)\n+\t\tgoto exit_q_bw_lmt;\n+\tnode = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);\n+\tif (!node) {\n+\t\tice_debug(pi->hw, ICE_DBG_SCHED, \"Wrong q_teid\\n\");\n+\t\tgoto exit_q_bw_lmt;\n+\t}\n+\n+\t/* Return error if it is not a leaf node */\n+\tif (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)\n+\t\tgoto exit_q_bw_lmt;\n+\n+\t/* SRL bandwidth layer selection */\n+\tif (rl_type == ICE_SHARED_BW) {\n+\t\tu8 sel_layer; /* selected layer */\n+\n+\t\tsel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,\n+\t\t\t\t\t\t\tnode->tx_sched_layer);\n+\t\tif (sel_layer >= pi->hw->num_tx_sched_layers) {\n+\t\t\tstatus = ICE_ERR_PARAM;\n+\t\t\tgoto exit_q_bw_lmt;\n+\t\t}\n+\t\tstatus = ice_sched_validate_srl_node(node, sel_layer);\n+\t\tif (status)\n+\t\t\tgoto exit_q_bw_lmt;\n+\t}\n+\n+\tif (bw == ICE_SCHED_DFLT_BW)\n+\t\tstatus = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);\n+\telse\n+\t\tstatus = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);\n+\n+\tif (!status)\n+\t\tstatus = ice_sched_save_q_bw(q_ctx, rl_type, bw);\n+\n+exit_q_bw_lmt:\n+\tmutex_unlock(&pi->sched_lock);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_cfg_q_bw_lmt - configure queue BW limit\n+ * @pi: port information structure\n+ * @vsi_handle: sw VSI handle\n+ * @tc: traffic class\n+ * @q_handle: software queue handle\n+ * @rl_type: min, max, or shared\n+ * @bw: bandwidth in Kbps\n+ *\n+ * This function configures BW limit of queue scheduling node.\n+ */\n+enum ice_status\n+ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,\n+\t\t u16 q_handle, enum ice_rl_type rl_type, u32 bw)\n+{\n+\treturn ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,\n+\t\t\t\t      bw);\n+}\n+\n+/**\n+ * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit\n+ * @pi: port information structure\n+ * @vsi_handle: sw VSI handle\n+ * @tc: traffic class\n+ * @q_handle: software queue handle\n+ * @rl_type: min, max, or shared\n+ *\n+ * This function configures BW default limit of queue scheduling node.\n+ */\n+enum ice_status\n+ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,\n+\t\t      u16 q_handle, enum ice_rl_type rl_type)\n+{\n+\treturn ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,\n+\t\t\t\t      ICE_SCHED_DFLT_BW);\n+}\n+\n+/**\n+ * ice_cfg_rl_burst_size - Set burst size value\n+ * @hw: pointer to the HW struct\n+ * @bytes: burst size in bytes\n+ *\n+ * This function configures/set the burst size to requested new value. The new\n+ * burst size value is used for future rate limit calls. It doesn't change the\n+ * existing or previously created RL profiles.\n+ */\n+enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)\n+{\n+\tu16 burst_size_to_prog;\n+\n+\tif (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||\n+\t    bytes > ICE_MAX_BURST_SIZE_ALLOWED)\n+\t\treturn ICE_ERR_PARAM;\n+\tif (ice_round_to_num(bytes, 64) <=\n+\t    ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {\n+\t\t/* 64 byte granularity case */\n+\t\t/* Disable MSB granularity bit */\n+\t\tburst_size_to_prog = ICE_64_BYTE_GRANULARITY;\n+\t\t/* round number to nearest 64 byte granularity */\n+\t\tbytes = ice_round_to_num(bytes, 64);\n+\t\t/* The value is in 64 byte chunks */\n+\t\tburst_size_to_prog |= (u16)(bytes / 64);\n+\t} else {\n+\t\t/* k bytes granularity case */\n+\t\t/* Enable MSB granularity bit */\n+\t\tburst_size_to_prog = ICE_KBYTE_GRANULARITY;\n+\t\t/* round number to nearest 1024 granularity */\n+\t\tbytes = ice_round_to_num(bytes, 1024);\n+\t\t/* check rounding doesn't go beyond allowed */\n+\t\tif (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)\n+\t\t\tbytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;\n+\t\t/* The value is in k bytes */\n+\t\tburst_size_to_prog |= (u16)(bytes / 1024);\n+\t}\n+\thw->max_burst_size = burst_size_to_prog;\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_sched_replay_node_prio - re-configure node priority\n+ * @hw: pointer to the HW struct\n+ * @node: sched node to configure\n+ * @priority: priority value\n+ *\n+ * This function configures node element's priority value. It\n+ * needs to be called with scheduler lock held.\n+ */\n+static enum ice_status\n+ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,\n+\t\t\t   u8 priority)\n+{\n+\tstruct ice_aqc_txsched_elem_data buf;\n+\tstruct ice_aqc_txsched_elem *data;\n+\tenum ice_status status;\n+\n+\tbuf = node->info;\n+\tdata = &buf.data;\n+\tdata->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;\n+\tdata->generic = priority;\n+\n+\t/* Configure element */\n+\tstatus = ice_sched_update_elem(hw, node, &buf);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_sched_replay_node_bw - replay node(s) BW\n+ * @hw: pointer to the HW struct\n+ * @node: sched node to configure\n+ * @bw_t_info: BW type information\n+ *\n+ * This function restores node's BW from bw_t_info. The caller needs\n+ * to hold the scheduler lock.\n+ */\n+static enum ice_status\n+ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,\n+\t\t\t struct ice_bw_type_info *bw_t_info)\n+{\n+\tstruct ice_port_info *pi = hw->port_info;\n+\tenum ice_status status = ICE_ERR_PARAM;\n+\tu16 bw_alloc;\n+\n+\tif (!node)\n+\t\treturn status;\n+\tif (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))\n+\t\treturn 0;\n+\tif (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {\n+\t\tstatus = ice_sched_replay_node_prio(hw, node,\n+\t\t\t\t\t\t    bw_t_info->generic);\n+\t\tif (status)\n+\t\t\treturn status;\n+\t}\n+\tif (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {\n+\t\tstatus = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,\n+\t\t\t\t\t\t   bw_t_info->cir_bw.bw);\n+\t\tif (status)\n+\t\t\treturn status;\n+\t}\n+\tif (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {\n+\t\tbw_alloc = bw_t_info->cir_bw.bw_alloc;\n+\t\tstatus = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,\n+\t\t\t\t\t\t     bw_alloc);\n+\t\tif (status)\n+\t\t\treturn status;\n+\t}\n+\tif (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {\n+\t\tstatus = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,\n+\t\t\t\t\t\t   bw_t_info->eir_bw.bw);\n+\t\tif (status)\n+\t\t\treturn status;\n+\t}\n+\tif (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {\n+\t\tbw_alloc = bw_t_info->eir_bw.bw_alloc;\n+\t\tstatus = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,\n+\t\t\t\t\t\t     bw_alloc);\n+\t\tif (status)\n+\t\t\treturn status;\n+\t}\n+\tif (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))\n+\t\tstatus = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,\n+\t\t\t\t\t\t   bw_t_info->shared_bw);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_sched_replay_q_bw - replay queue type node BW\n+ * @pi: port information structure\n+ * @q_ctx: queue context structure\n+ *\n+ * This function replays queue type node bandwidth. This function needs to be\n+ * called with scheduler lock held.\n+ */\n+enum ice_status\n+ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)\n+{\n+\tstruct ice_sched_node *q_node;\n+\n+\t/* Following also checks the presence of node in tree */\n+\tq_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);\n+\tif (!q_node)\n+\t\treturn ICE_ERR_PARAM;\n+\treturn ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h\nindex 3902a8ad3025..f0593cfb6521 100644\n--- a/drivers/net/ethernet/intel/ice/ice_sched.h\n+++ b/drivers/net/ethernet/intel/ice/ice_sched.h\n@@ -8,6 +8,36 @@\n \n #define ICE_QGRP_LAYER_OFFSET\t2\n #define ICE_VSI_LAYER_OFFSET\t4\n+#define ICE_SCHED_INVAL_LAYER_NUM\t0xFF\n+/* Burst size is a 12 bits register that is configured while creating the RL\n+ * profile(s). MSB is a granularity bit and tells the granularity type\n+ * 0 - LSB bits are in 64 bytes granularity\n+ * 1 - LSB bits are in 1K bytes granularity\n+ */\n+#define ICE_64_BYTE_GRANULARITY\t\t\t0\n+#define ICE_KBYTE_GRANULARITY\t\t\tBIT(11)\n+#define ICE_MIN_BURST_SIZE_ALLOWED\t\t64 /* In Bytes */\n+#define ICE_MAX_BURST_SIZE_ALLOWED \\\n+\t((BIT(11) - 1) * 1024) /* In Bytes */\n+#define ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY \\\n+\t((BIT(11) - 1) * 64) /* In Bytes */\n+#define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY\tICE_MAX_BURST_SIZE_ALLOWED\n+\n+#define ICE_RL_PROF_FREQUENCY 446000000\n+#define ICE_RL_PROF_ACCURACY_BYTES 128\n+#define ICE_RL_PROF_MULTIPLIER 10000\n+#define ICE_RL_PROF_TS_MULTIPLIER 32\n+#define ICE_RL_PROF_FRACTION 512\n+\n+/* BW rate limit profile parameters list entry along\n+ * with bandwidth maintained per layer in port info\n+ */\n+struct ice_aqc_rl_profile_info {\n+\tstruct ice_aqc_rl_profile_elem profile;\n+\tstruct list_head list_entry;\n+\tu32 bw;\t\t\t/* requested */\n+\tu16 prof_id_ref;\t/* profile ID to node association ref count */\n+};\n \n struct ice_sched_agg_vsi_info {\n \tstruct list_head list_entry;\n@@ -48,4 +78,13 @@ enum ice_status\n ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,\n \t\t  u8 owner, bool enable);\n enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);\n+enum ice_status\n+ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,\n+\t\t u16 q_handle, enum ice_rl_type rl_type, u32 bw);\n+enum ice_status\n+ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,\n+\t\t      u16 q_handle, enum ice_rl_type rl_type);\n+enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);\n+enum ice_status\n+ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);\n #endif /* _ICE_SCHED_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h\nindex cb123fbe30be..fa14b9545dab 100644\n--- a/drivers/net/ethernet/intel/ice/ice_switch.h\n+++ b/drivers/net/ethernet/intel/ice/ice_switch.h\n@@ -14,11 +14,6 @@\n #define ICE_VSI_INVAL_ID 0xffff\n #define ICE_INVAL_Q_HANDLE 0xFFFF\n \n-/* VSI queue context structure */\n-struct ice_q_ctx {\n-\tu16  q_handle;\n-};\n-\n /* VSI context structure for add/get/update/free operations */\n struct ice_vsi_ctx {\n \tu16 vsi_num;\ndiff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h\nindex 08fe3e5e72d4..d3d7049c97f0 100644\n--- a/drivers/net/ethernet/intel/ice/ice_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_type.h\n@@ -19,6 +19,17 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)\n \treturn test_bit(tc, &bitmap);\n }\n \n+static inline u64 round_up_64bit(u64 a, u32 b)\n+{\n+\treturn div64_long(((a) + (b) / 2), (b));\n+}\n+\n+static inline u32 ice_round_to_num(u32 N, u32 R)\n+{\n+\treturn ((((N) % (R)) < ((R) / 2)) ? (((N) / (R)) * (R)) :\n+\t\t((((N) + (R) - 1) / (R)) * (R)));\n+}\n+\n /* Driver always calls main vsi_handle first */\n #define ICE_MAIN_VSI_HANDLE\t\t0\n \n@@ -272,10 +283,56 @@ enum ice_agg_type {\n \tICE_AGG_TYPE_QG\n };\n \n+/* Rate limit types */\n+enum ice_rl_type {\n+\tICE_UNKNOWN_BW = 0,\n+\tICE_MIN_BW,\t\t/* for CIR profile */\n+\tICE_MAX_BW,\t\t/* for EIR profile */\n+\tICE_SHARED_BW\t\t/* for shared profile */\n+};\n+\n+#define ICE_SCHED_MIN_BW\t\t500\t\t/* in Kbps */\n+#define ICE_SCHED_MAX_BW\t\t100000000\t/* in Kbps */\n+#define ICE_SCHED_DFLT_BW\t\t0xFFFFFFFF\t/* unlimited */\n #define ICE_SCHED_DFLT_RL_PROF_ID\t0\n+#define ICE_SCHED_NO_SHARED_RL_PROF_ID\t0xFFFF\n #define ICE_SCHED_DFLT_BW_WT\t\t1\n+#define ICE_SCHED_INVAL_PROF_ID\t\t0xFFFF\n+#define ICE_SCHED_DFLT_BURST_SIZE\t(15 * 1024)\t/* in bytes (15k) */\n \n-/* VSI type list entry to locate corresponding VSI/ag nodes */\n+ /* Data structure for saving BW information */\n+enum ice_bw_type {\n+\tICE_BW_TYPE_PRIO,\n+\tICE_BW_TYPE_CIR,\n+\tICE_BW_TYPE_CIR_WT,\n+\tICE_BW_TYPE_EIR,\n+\tICE_BW_TYPE_EIR_WT,\n+\tICE_BW_TYPE_SHARED,\n+\tICE_BW_TYPE_CNT\t\t/* This must be last */\n+};\n+\n+struct ice_bw {\n+\tu32 bw;\n+\tu16 bw_alloc;\n+};\n+\n+struct ice_bw_type_info {\n+\tDECLARE_BITMAP(bw_t_bitmap, ICE_BW_TYPE_CNT);\n+\tu8 generic;\n+\tstruct ice_bw cir_bw;\n+\tstruct ice_bw eir_bw;\n+\tu32 shared_bw;\n+};\n+\n+/* VSI queue context structure for given TC */\n+struct ice_q_ctx {\n+\tu16  q_handle;\n+\tu32  q_teid;\n+\t/* bw_t_info saves queue BW information */\n+\tstruct ice_bw_type_info bw_t_info;\n+};\n+\n+/* VSI type list entry to locate corresponding VSI/aggregator nodes */\n struct ice_sched_vsi_info {\n \tstruct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];\n \tstruct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];\n@@ -364,6 +421,8 @@ struct ice_port_info {\n \tstruct mutex sched_lock;\t/* protect access to TXSched tree */\n \tstruct ice_sched_node *\n \t\tsib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];\n+\t/* List contain profile ID(s) and other params per layer */\n+\tstruct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];\n \tstruct ice_dcbx_cfg local_dcbx_cfg;\t/* Oper/Local Cfg */\n \t/* DCBX info */\n \tstruct ice_dcbx_cfg remote_dcbx_cfg;\t/* Peer Cfg */\n@@ -415,6 +474,8 @@ struct ice_hw {\n \n \tu8 pf_id;\t\t/* device profile info */\n \n+\tu16 max_burst_size;\t/* driver sets this value */\n+\n \t/* Tx Scheduler values */\n \tu16 num_tx_sched_layers;\n \tu16 num_tx_sched_phys_layers;\n",
    "prefixes": [
        "S32",
        "v3",
        "02/15"
    ]
}