get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/951912/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 951912,
    "url": "http://patchwork.ozlabs.org/api/patches/951912/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180801040433.5865-2-anirudh.venkataramanan@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20180801040433.5865-2-anirudh.venkataramanan@intel.com>",
    "list_archive_url": null,
    "date": "2018-08-01T04:04:21",
    "name": "[v2,01/13] ice: Minor updates to Tx scheduler code",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "bd23cbd61474dfffc6fc263152d66210914844fc",
    "submitter": {
        "id": 73601,
        "url": "http://patchwork.ozlabs.org/api/people/73601/?format=api",
        "name": "Anirudh Venkataramanan",
        "email": "anirudh.venkataramanan@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180801040433.5865-2-anirudh.venkataramanan@intel.com/mbox/",
    "series": [
        {
            "id": 58674,
            "url": "http://patchwork.ozlabs.org/api/series/58674/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=58674",
            "date": "2018-08-01T04:04:20",
            "name": "Feature updates for ice",
            "version": 2,
            "mbox": "http://patchwork.ozlabs.org/series/58674/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/951912/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/951912/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.133; helo=hemlock.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 41gKSw47xWz9s1x\n\tfor <incoming@patchwork.ozlabs.org>;\n\tWed,  1 Aug 2018 14:04:48 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 15DE287F22;\n\tWed,  1 Aug 2018 04:04:47 +0000 (UTC)",
            "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id l3q5SWXVr8jg; Wed,  1 Aug 2018 04:04:44 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id D6DF387F21;\n\tWed,  1 Aug 2018 04:04:44 +0000 (UTC)",
            "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\tby ash.osuosl.org (Postfix) with ESMTP id 729AA1C0BBC\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed,  1 Aug 2018 04:04:43 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id 7048425C00\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed,  1 Aug 2018 04:04:43 +0000 (UTC)",
            "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id ebulyJmaWQTR for <intel-wired-lan@lists.osuosl.org>;\n\tWed,  1 Aug 2018 04:04:40 +0000 (UTC)",
            "from mga12.intel.com (mga12.intel.com [192.55.52.136])\n\tby silver.osuosl.org (Postfix) with ESMTPS id C0BE525BF8\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed,  1 Aug 2018 04:04:40 +0000 (UTC)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t31 Jul 2018 21:04:39 -0700",
            "from lnahar-mobl.amr.corp.intel.com (HELO\n\tavenkata-mobl4.localdomain) ([10.252.134.1])\n\tby FMSMGA003.fm.intel.com with ESMTP; 31 Jul 2018 21:04:39 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.51,430,1526367600\"; d=\"scan'208\";a=\"69105085\"",
        "From": "Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Tue, 31 Jul 2018 21:04:21 -0700",
        "Message-Id": "<20180801040433.5865-2-anirudh.venkataramanan@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20180801040433.5865-1-anirudh.venkataramanan@intel.com>",
        "References": "<20180801040433.5865-1-anirudh.venkataramanan@intel.com>",
        "Subject": "[Intel-wired-lan] [PATCH v2 01/13] ice: Minor updates to Tx\n\tscheduler code",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.24",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "1) The maximum device nodes is a global value and shared by the whole\n   device. Add element AQ command would fail if there is no space to\n   add new nodes so the check for max nodes isn't required. So remove\n   ice_sched_get_num_nodes_per_layer and ice_sched_val_max_nodes.\n\n2) In ice_sched_add_elems, set default node's CIR/EIR bandwidth weight.\n\n3) Fix default scheduler topology buffer size as the firmware expects\n   a 4KB buffer at all times, and will error out if one of any other\n   size is provided.\n\n4) Fix some newline/whitespace issues for consistency.\n\nSigned-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>\n---\n drivers/net/ethernet/intel/ice/ice_sched.c | 117 +++++----------------\n drivers/net/ethernet/intel/ice/ice_type.h  |   1 +\n 2 files changed, 25 insertions(+), 93 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c\nindex eeae199469b6..9aca28d4ef55 100644\n--- a/drivers/net/ethernet/intel/ice/ice_sched.c\n+++ b/drivers/net/ethernet/intel/ice/ice_sched.c\n@@ -192,14 +192,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,\n \tbuf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);\n \tif (!buf)\n \t\treturn ICE_ERR_NO_MEMORY;\n+\n \tbuf->hdr.parent_teid = parent->info.node_teid;\n \tbuf->hdr.num_elems = cpu_to_le16(num_nodes);\n \tfor (i = 0; i < num_nodes; i++)\n \t\tbuf->teid[i] = cpu_to_le32(node_teids[i]);\n+\n \tstatus = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,\n \t\t\t\t\t   &num_groups_removed, NULL);\n \tif (status || num_groups_removed != 1)\n \t\tice_debug(hw, ICE_DBG_SCHED, \"remove elements failed\\n\");\n+\n \tdevm_kfree(ice_hw_to_dev(hw), buf);\n \treturn status;\n }\n@@ -592,13 +595,16 @@ static void ice_sched_clear_port(struct ice_port_info *pi)\n  */\n void ice_sched_cleanup_all(struct ice_hw *hw)\n {\n-\tif (!hw || !hw->port_info)\n+\tif (!hw)\n \t\treturn;\n \n-\tif (hw->layer_info)\n+\tif (hw->layer_info) {\n \t\tdevm_kfree(ice_hw_to_dev(hw), hw->layer_info);\n+\t\thw->layer_info = NULL;\n+\t}\n \n-\tice_sched_clear_port(hw->port_info);\n+\tif (hw->port_info)\n+\t\tice_sched_clear_port(hw->port_info);\n \n \thw->num_tx_sched_layers = 0;\n \thw->num_tx_sched_phys_layers = 0;\n@@ -671,9 +677,13 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,\n \t\t\tICE_AQC_ELEM_VALID_EIR;\n \t\tbuf->generic[i].data.generic = 0;\n \t\tbuf->generic[i].data.cir_bw.bw_profile_idx =\n-\t\t\tICE_SCHED_DFLT_RL_PROF_ID;\n+\t\t\tcpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);\n+\t\tbuf->generic[i].data.cir_bw.bw_alloc =\n+\t\t\tcpu_to_le16(ICE_SCHED_DFLT_BW_WT);\n \t\tbuf->generic[i].data.eir_bw.bw_profile_idx =\n-\t\t\tICE_SCHED_DFLT_RL_PROF_ID;\n+\t\t\tcpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);\n+\t\tbuf->generic[i].data.eir_bw.bw_alloc =\n+\t\t\tcpu_to_le16(ICE_SCHED_DFLT_BW_WT);\n \t}\n \n \tstatus = ice_aq_add_sched_elems(hw, 1, buf, buf_size,\n@@ -697,7 +707,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,\n \n \t\tteid = le32_to_cpu(buf->generic[i].node_teid);\n \t\tnew_node = ice_sched_find_node_by_teid(parent, teid);\n-\n \t\tif (!new_node) {\n \t\t\tice_debug(hw, ICE_DBG_SCHED,\n \t\t\t\t  \"Node is missing for teid =%d\\n\", teid);\n@@ -710,7 +719,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,\n \t\t/* add it to previous node sibling pointer */\n \t\t/* Note: siblings are not linked across branches */\n \t\tprev = ice_sched_get_first_node(hw, tc_node, layer);\n-\n \t\tif (prev && prev != new_node) {\n \t\t\twhile (prev->sibling)\n \t\t\t\tprev = prev->sibling;\n@@ -850,78 +858,6 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)\n \treturn hw->sw_entry_point_layer;\n }\n \n-/**\n- * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer\n- * @pi: pointer to the port info struct\n- * @layer: layer number\n- *\n- * This function calculates the number of nodes present in the scheduler tree\n- * including all the branches for a given layer\n- */\n-static u16\n-ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer)\n-{\n-\tstruct ice_hw *hw;\n-\tu16 num_nodes = 0;\n-\tu8 i;\n-\n-\tif (!pi)\n-\t\treturn num_nodes;\n-\n-\thw = pi->hw;\n-\n-\t/* Calculate the number of nodes for all TCs */\n-\tfor (i = 0; i < pi->root->num_children; i++) {\n-\t\tstruct ice_sched_node *tc_node, *node;\n-\n-\t\ttc_node = pi->root->children[i];\n-\n-\t\t/* Get the first node */\n-\t\tnode = ice_sched_get_first_node(hw, tc_node, layer);\n-\t\tif (!node)\n-\t\t\tcontinue;\n-\n-\t\t/* count the siblings */\n-\t\twhile (node) {\n-\t\t\tnum_nodes++;\n-\t\t\tnode = node->sibling;\n-\t\t}\n-\t}\n-\n-\treturn num_nodes;\n-}\n-\n-/**\n- * ice_sched_val_max_nodes - check max number of nodes reached or not\n- * @pi: port information structure\n- * @new_num_nodes_per_layer: pointer to the new number of nodes array\n- *\n- * This function checks whether the scheduler tree layers have enough space to\n- * add new nodes\n- */\n-static enum ice_status\n-ice_sched_validate_for_max_nodes(struct ice_port_info *pi,\n-\t\t\t\t u16 *new_num_nodes_per_layer)\n-{\n-\tstruct ice_hw *hw = pi->hw;\n-\tu8 i, qg_layer;\n-\tu16 num_nodes;\n-\n-\tqg_layer = ice_sched_get_qgrp_layer(hw);\n-\n-\t/* walk through all the layers from SW entry point to qgroup layer */\n-\tfor (i = hw->sw_entry_point_layer; i <= qg_layer; i++) {\n-\t\tnum_nodes = ice_sched_get_num_nodes_per_layer(pi, i);\n-\t\tif (num_nodes + new_num_nodes_per_layer[i] >\n-\t\t    le16_to_cpu(hw->layer_info[i].max_pf_nodes)) {\n-\t\t\tice_debug(hw, ICE_DBG_SCHED,\n-\t\t\t\t  \"max nodes reached for layer = %d\\n\", i);\n-\t\t\treturn ICE_ERR_CFG;\n-\t\t}\n-\t}\n-\treturn 0;\n-}\n-\n /**\n  * ice_rm_dflt_leaf_node - remove the default leaf node in the tree\n  * @pi: port information structure\n@@ -1003,14 +939,12 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)\n \thw = pi->hw;\n \n \t/* Query the Default Topology from FW */\n-\tbuf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES,\n-\t\t\t   sizeof(*buf), GFP_KERNEL);\n+\tbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);\n \tif (!buf)\n \t\treturn ICE_ERR_NO_MEMORY;\n \n \t/* Query default scheduling tree topology */\n-\tstatus = ice_aq_get_dflt_topo(hw, pi->lport, buf,\n-\t\t\t\t      sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES,\n+\tstatus = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,\n \t\t\t\t      &num_branches, NULL);\n \tif (status)\n \t\tgoto err_init_port;\n@@ -1311,16 +1245,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,\n \tu16 num_added = 0;\n \tu8 i, qgl, vsil;\n \n-\tstatus = ice_sched_validate_for_max_nodes(pi, num_nodes);\n-\tif (status)\n-\t\treturn status;\n-\n \tqgl = ice_sched_get_qgrp_layer(hw);\n \tvsil = ice_sched_get_vsi_layer(hw);\n \tparent = ice_sched_get_vsi_node(hw, tc_node, vsi_id);\n \tfor (i = vsil + 1; i <= qgl; i++) {\n \t\tif (!parent)\n \t\t\treturn ICE_ERR_CFG;\n+\n \t\tstatus = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,\n \t\t\t\t\t\t      num_nodes[i],\n \t\t\t\t\t\t      &first_node_teid,\n@@ -1399,7 +1330,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,\n {\n \tstruct ice_sched_node *node;\n \tu16 max_child;\n-\tu8 i, vsil;\n+\tu8 vsil;\n+\tint i;\n \n \tvsil = ice_sched_get_vsi_layer(hw);\n \tfor (i = vsil; i >= hw->sw_entry_point_layer; i--)\n@@ -1412,7 +1344,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,\n \t\t\t/* If intermediate nodes are reached max children\n \t\t\t * then add a new one.\n \t\t\t */\n-\t\t\tnode = ice_sched_get_first_node(hw, tc_node, i);\n+\t\t\tnode = ice_sched_get_first_node(hw, tc_node, (u8)i);\n \t\t\tmax_child = le16_to_cpu(hw->layer_info[i].max_children);\n \n \t\t\t/* scan all the siblings */\n@@ -1451,10 +1383,6 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,\n \tif (!pi)\n \t\treturn ICE_ERR_PARAM;\n \n-\tstatus = ice_sched_validate_for_max_nodes(pi, num_nodes);\n-\tif (status)\n-\t\treturn status;\n-\n \tvsil = ice_sched_get_vsi_layer(pi->hw);\n \tfor (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {\n \t\tstatus = ice_sched_add_nodes_to_layer(pi, tc_node, parent,\n@@ -1479,6 +1407,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,\n \t\tif (i == vsil)\n \t\t\tparent->vsi_id = vsi_id;\n \t}\n+\n \treturn 0;\n }\n \n@@ -1633,9 +1562,11 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,\n \t\tstatus = ice_sched_add_vsi_to_topo(pi, vsi_id, tc);\n \t\tif (status)\n \t\t\treturn status;\n+\n \t\tvsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);\n \t\tif (!vsi_node)\n \t\t\treturn ICE_ERR_CFG;\n+\n \t\tvsi->vsi_node[tc] = vsi_node;\n \t\tvsi_node->in_use = true;\n \t}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h\nindex 97c366e0ca59..f5aff99bbb61 100644\n--- a/drivers/net/ethernet/intel/ice/ice_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_type.h\n@@ -204,6 +204,7 @@ enum ice_agg_type {\n };\n \n #define ICE_SCHED_DFLT_RL_PROF_ID\t0\n+#define ICE_SCHED_DFLT_BW_WT\t\t1\n \n /* vsi type list entry to locate corresponding vsi/ag nodes */\n struct ice_sched_vsi_info {\n",
    "prefixes": [
        "v2",
        "01/13"
    ]
}