get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/972031/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 972031,
    "url": "http://patchwork.ozlabs.org/api/patches/972031/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180920002319.10971-3-anirudh.venkataramanan@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20180920002319.10971-3-anirudh.venkataramanan@intel.com>",
    "list_archive_url": null,
    "date": "2018-09-20T00:23:05",
    "name": "[02/16] ice: Move common functions out of ice_main.c part 2/7",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "5261537eb91bf3dac2567cf704c3b2489e4fdf71",
    "submitter": {
        "id": 73601,
        "url": "http://patchwork.ozlabs.org/api/people/73601/?format=api",
        "name": "Anirudh Venkataramanan",
        "email": "anirudh.venkataramanan@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180920002319.10971-3-anirudh.venkataramanan@intel.com/mbox/",
    "series": [
        {
            "id": 66525,
            "url": "http://patchwork.ozlabs.org/api/series/66525/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=66525",
            "date": "2018-09-20T00:23:03",
            "name": "Implementation updates for ice",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/66525/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/972031/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/972031/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.137; helo=fraxinus.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 42FyBb3Nplz9sBJ\n\tfor <incoming@patchwork.ozlabs.org>;\n\tThu, 20 Sep 2018 10:23:35 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id E073A876DA;\n\tThu, 20 Sep 2018 00:23:33 +0000 (UTC)",
            "from fraxinus.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id KvCQEFHOr98l; Thu, 20 Sep 2018 00:23:30 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id C9106876A9;\n\tThu, 20 Sep 2018 00:23:30 +0000 (UTC)",
            "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\tby ash.osuosl.org (Postfix) with ESMTP id 0BF381C08AF\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 20 Sep 2018 00:23:29 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id 081AC227F5\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 20 Sep 2018 00:23:29 +0000 (UTC)",
            "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id LU2H++wi6GiO for <intel-wired-lan@lists.osuosl.org>;\n\tThu, 20 Sep 2018 00:23:24 +0000 (UTC)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n\tby silver.osuosl.org (Postfix) with ESMTPS id 46B50302BD\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 20 Sep 2018 00:23:24 +0000 (UTC)",
            "from fmsmga006.fm.intel.com ([10.253.24.20])\n\tby fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t19 Sep 2018 17:23:23 -0700",
            "from shasta.jf.intel.com ([10.166.241.11])\n\tby fmsmga006.fm.intel.com with ESMTP; 19 Sep 2018 17:23:19 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.53,396,1531810800\"; d=\"scan'208\";a=\"265057691\"",
        "From": "Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Wed, 19 Sep 2018 17:23:05 -0700",
        "Message-Id": "<20180920002319.10971-3-anirudh.venkataramanan@intel.com>",
        "X-Mailer": "git-send-email 2.14.3",
        "In-Reply-To": "<20180920002319.10971-1-anirudh.venkataramanan@intel.com>",
        "References": "<20180920002319.10971-1-anirudh.venkataramanan@intel.com>",
        "Subject": "[Intel-wired-lan] [PATCH 02/16] ice: Move common functions out of\n\tice_main.c part 2/7",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.24",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "This patch continues the code move out of ice_main.c\n\nThe following top level functions (and related dependency functions) were\nmoved to ice_lib.c:\nice_vsi_start_rx_rings\nice_vsi_stop_rx_rings\nice_vsi_stop_tx_rings\nice_vsi_cfg_rxqs\nice_vsi_cfg_txqs\nice_vsi_cfg_msix\n\nSigned-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>\n---\n drivers/net/ethernet/intel/ice/ice_lib.c  | 491 +++++++++++++++++++++++++++\n drivers/net/ethernet/intel/ice/ice_lib.h  |  13 +\n drivers/net/ethernet/intel/ice/ice_main.c | 541 ++----------------------------\n 3 files changed, 526 insertions(+), 519 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c\nindex d938dad7bb5f..8fb7821f021e 100644\n--- a/drivers/net/ethernet/intel/ice/ice_lib.c\n+++ b/drivers/net/ethernet/intel/ice/ice_lib.c\n@@ -4,6 +4,227 @@\n #include \"ice.h\"\n #include \"ice_lib.h\"\n \n+/**\n+ * ice_setup_rx_ctx - Configure a receive ring context\n+ * @ring: The Rx ring to configure\n+ *\n+ * Configure the Rx descriptor ring in RLAN context.\n+ */\n+static int ice_setup_rx_ctx(struct ice_ring *ring)\n+{\n+\tstruct ice_vsi *vsi = ring->vsi;\n+\tstruct ice_hw *hw = &vsi->back->hw;\n+\tu32 rxdid = ICE_RXDID_FLEX_NIC;\n+\tstruct ice_rlan_ctx rlan_ctx;\n+\tu32 regval;\n+\tu16 pf_q;\n+\tint err;\n+\n+\t/* what is RX queue number in global space of 2K rx queues */\n+\tpf_q = vsi->rxq_map[ring->q_index];\n+\n+\t/* clear the context structure first */\n+\tmemset(&rlan_ctx, 0, sizeof(rlan_ctx));\n+\n+\trlan_ctx.base = ring->dma >> 7;\n+\n+\trlan_ctx.qlen = ring->count;\n+\n+\t/* Receive Packet Data Buffer Size.\n+\t * The Packet Data Buffer Size is defined in 128 byte units.\n+\t */\n+\trlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;\n+\n+\t/* use 32 byte descriptors */\n+\trlan_ctx.dsize = 1;\n+\n+\t/* Strip the Ethernet CRC bytes before the packet is posted to host\n+\t * memory.\n+\t */\n+\trlan_ctx.crcstrip = 1;\n+\n+\t/* L2TSEL flag defines the reported L2 Tags in the receive descriptor */\n+\trlan_ctx.l2tsel = 1;\n+\n+\trlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;\n+\trlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;\n+\trlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;\n+\n+\t/* This controls whether VLAN is stripped from inner headers\n+\t * The VLAN in the inner L2 header is stripped to the receive\n+\t * descriptor if enabled by this flag.\n+\t */\n+\trlan_ctx.showiv = 0;\n+\n+\t/* Max packet size for this queue - must not be set to a larger value\n+\t * than 5 x DBUF\n+\t */\n+\trlan_ctx.rxmax = min_t(u16, vsi->max_frame,\n+\t\t\t       ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);\n+\n+\t/* Rx queue threshold in units of 64 */\n+\trlan_ctx.lrxqthresh = 1;\n+\n+\t /* Enable Flexible Descriptors in the queue context which\n+\t  * allows this driver to select a specific receive descriptor format\n+\t  */\n+\tregval = rd32(hw, QRXFLXP_CNTXT(pf_q));\n+\tregval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &\n+\t\tQRXFLXP_CNTXT_RXDID_IDX_M;\n+\n+\t/* increasing context priority to pick up profile id;\n+\t * default is 0x01; setting to 0x03 to ensure profile\n+\t * is programming if prev context is of same priority\n+\t */\n+\tregval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &\n+\t\tQRXFLXP_CNTXT_RXDID_PRIO_M;\n+\n+\twr32(hw, QRXFLXP_CNTXT(pf_q), regval);\n+\n+\t/* Absolute queue number out of 2K needs to be passed */\n+\terr = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);\n+\tif (err) {\n+\t\tdev_err(&vsi->back->pdev->dev,\n+\t\t\t\"Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\\n\",\n+\t\t\tpf_q, err);\n+\t\treturn -EIO;\n+\t}\n+\n+\t/* init queue specific tail register */\n+\tring->tail = hw->hw_addr + QRX_TAIL(pf_q);\n+\twritel(0, ring->tail);\n+\tice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance\n+ * @ring: The Tx ring to configure\n+ * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized\n+ * @pf_q: queue index in the PF space\n+ *\n+ * Configure the Tx descriptor ring in TLAN context.\n+ */\n+static void\n+ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)\n+{\n+\tstruct ice_vsi *vsi = ring->vsi;\n+\tstruct ice_hw *hw = &vsi->back->hw;\n+\n+\ttlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;\n+\n+\ttlan_ctx->port_num = vsi->port_info->lport;\n+\n+\t/* Transmit Queue Length */\n+\ttlan_ctx->qlen = ring->count;\n+\n+\t/* PF number */\n+\ttlan_ctx->pf_num = hw->pf_id;\n+\n+\t/* queue belongs to a specific VSI type\n+\t * VF / VM index should be programmed per vmvf_type setting:\n+\t * for vmvf_type = VF, it is VF number between 0-256\n+\t * for vmvf_type = VM, it is VM number between 0-767\n+\t * for PF or EMP this field should be set to zero\n+\t */\n+\tswitch (vsi->type) {\n+\tcase ICE_VSI_PF:\n+\t\ttlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn;\n+\t}\n+\n+\t/* make sure the context is associated with the right VSI */\n+\ttlan_ctx->src_vsi = vsi->vsi_num;\n+\n+\ttlan_ctx->tso_ena = ICE_TX_LEGACY;\n+\ttlan_ctx->tso_qnum = pf_q;\n+\n+\t/* Legacy or Advanced Host Interface:\n+\t * 0: Advanced Host Interface\n+\t * 1: Legacy Host Interface\n+\t */\n+\ttlan_ctx->legacy_int = ICE_TX_LEGACY;\n+}\n+\n+/**\n+ * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled\n+ * @pf: the PF being configured\n+ * @pf_q: the PF queue\n+ * @ena: enable or disable state of the queue\n+ *\n+ * This routine will wait for the given Rx queue of the PF to reach the\n+ * enabled or disabled state.\n+ * Returns -ETIMEDOUT in case of failing to reach the requested state after\n+ * multiple retries; else will return 0 in case of success.\n+ */\n+static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {\n+\t\tu32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));\n+\n+\t\tif (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))\n+\t\t\tbreak;\n+\n+\t\tusleep_range(10, 20);\n+\t}\n+\tif (i >= ICE_Q_WAIT_RETRY_LIMIT)\n+\t\treturn -ETIMEDOUT;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings\n+ * @vsi: the VSI being configured\n+ * @ena: start or stop the rx rings\n+ */\n+static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tint i, j, ret = 0;\n+\n+\tfor (i = 0; i < vsi->num_rxq; i++) {\n+\t\tint pf_q = vsi->rxq_map[i];\n+\t\tu32 rx_reg;\n+\n+\t\tfor (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {\n+\t\t\trx_reg = rd32(hw, QRX_CTRL(pf_q));\n+\t\t\tif (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==\n+\t\t\t    ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))\n+\t\t\t\tbreak;\n+\t\t\tusleep_range(1000, 2000);\n+\t\t}\n+\n+\t\t/* Skip if the queue is already in the requested state */\n+\t\tif (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))\n+\t\t\tcontinue;\n+\n+\t\t/* turn on/off the queue */\n+\t\tif (ena)\n+\t\t\trx_reg |= QRX_CTRL_QENA_REQ_M;\n+\t\telse\n+\t\t\trx_reg &= ~QRX_CTRL_QENA_REQ_M;\n+\t\twr32(hw, QRX_CTRL(pf_q), rx_reg);\n+\n+\t\t/* wait for the change to finish */\n+\t\tret = ice_pf_rxq_wait(pf, pf_q, ena);\n+\t\tif (ret) {\n+\t\t\tdev_err(&pf->pdev->dev,\n+\t\t\t\t\"VSI idx %d Rx ring %d %sable timeout\\n\",\n+\t\t\t\tvsi->idx, pf_q, (ena ? \"en\" : \"dis\"));\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn ret;\n+}\n+\n /**\n  * ice_add_mac_to_list - Add a mac address filter entry to the list\n  * @vsi: the VSI to be forwarded to\n@@ -185,6 +406,174 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)\n \treturn status;\n }\n \n+/**\n+ * ice_vsi_cfg_rxqs - Configure the VSI for Rx\n+ * @vsi: the VSI being configured\n+ *\n+ * Return 0 on success and a negative value on error\n+ * Configure the Rx VSI for operation.\n+ */\n+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)\n+{\n+\tint err = 0;\n+\tu16 i;\n+\n+\tif (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)\n+\t\tvsi->max_frame = vsi->netdev->mtu +\n+\t\t\tETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;\n+\telse\n+\t\tvsi->max_frame = ICE_RXBUF_2048;\n+\n+\tvsi->rx_buf_len = ICE_RXBUF_2048;\n+\t/* set up individual rings */\n+\tfor (i = 0; i < vsi->num_rxq && !err; i++)\n+\t\terr = ice_setup_rx_ctx(vsi->rx_rings[i]);\n+\n+\tif (err) {\n+\t\tdev_err(&vsi->back->pdev->dev, \"ice_setup_rx_ctx failed\\n\");\n+\t\treturn -EIO;\n+\t}\n+\treturn err;\n+}\n+\n+/**\n+ * ice_vsi_cfg_txqs - Configure the VSI for Tx\n+ * @vsi: the VSI being configured\n+ *\n+ * Return 0 on success and a negative value on error\n+ * Configure the Tx VSI for operation.\n+ */\n+int ice_vsi_cfg_txqs(struct ice_vsi *vsi)\n+{\n+\tstruct ice_aqc_add_tx_qgrp *qg_buf;\n+\tstruct ice_aqc_add_txqs_perq *txq;\n+\tstruct ice_pf *pf = vsi->back;\n+\tenum ice_status status;\n+\tu16 buf_len, i, pf_q;\n+\tint err = 0, tc = 0;\n+\tu8 num_q_grps;\n+\n+\tbuf_len = sizeof(struct ice_aqc_add_tx_qgrp);\n+\tqg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);\n+\tif (!qg_buf)\n+\t\treturn -ENOMEM;\n+\n+\tif (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {\n+\t\terr = -EINVAL;\n+\t\tgoto err_cfg_txqs;\n+\t}\n+\tqg_buf->num_txqs = 1;\n+\tnum_q_grps = 1;\n+\n+\t/* set up and configure the tx queues */\n+\tice_for_each_txq(vsi, i) {\n+\t\tstruct ice_tlan_ctx tlan_ctx = { 0 };\n+\n+\t\tpf_q = vsi->txq_map[i];\n+\t\tice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);\n+\t\t/* copy context contents into the qg_buf */\n+\t\tqg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);\n+\t\tice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,\n+\t\t\t    ice_tlan_ctx_info);\n+\n+\t\t/* init queue specific tail reg. It is referred as transmit\n+\t\t * comm scheduler queue doorbell.\n+\t\t */\n+\t\tvsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);\n+\t\tstatus = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,\n+\t\t\t\t\t num_q_grps, qg_buf, buf_len, NULL);\n+\t\tif (status) {\n+\t\t\tdev_err(&vsi->back->pdev->dev,\n+\t\t\t\t\"Failed to set LAN Tx queue context, error: %d\\n\",\n+\t\t\t\tstatus);\n+\t\t\terr = -ENODEV;\n+\t\t\tgoto err_cfg_txqs;\n+\t\t}\n+\n+\t\t/* Add Tx Queue TEID into the VSI tx ring from the response\n+\t\t * This will complete configuring and enabling the queue.\n+\t\t */\n+\t\ttxq = &qg_buf->txqs[0];\n+\t\tif (pf_q == le16_to_cpu(txq->txq_id))\n+\t\t\tvsi->tx_rings[i]->txq_teid =\n+\t\t\t\tle32_to_cpu(txq->q_teid);\n+\t}\n+err_cfg_txqs:\n+\tdevm_kfree(&pf->pdev->dev, qg_buf);\n+\treturn err;\n+}\n+\n+/**\n+ * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW\n+ * @vsi: the VSI being configured\n+ */\n+void ice_vsi_cfg_msix(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tu16 vector = vsi->base_vector;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tu32 txq = 0, rxq = 0;\n+\tint i, q, itr;\n+\tu8 itr_gran;\n+\n+\tfor (i = 0; i < vsi->num_q_vectors; i++, vector++) {\n+\t\tstruct ice_q_vector *q_vector = vsi->q_vectors[i];\n+\n+\t\titr_gran = hw->itr_gran_200;\n+\n+\t\tif (q_vector->num_ring_rx) {\n+\t\t\tq_vector->rx.itr =\n+\t\t\t\tITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,\n+\t\t\t\t\t   itr_gran);\n+\t\t\tq_vector->rx.latency_range = ICE_LOW_LATENCY;\n+\t\t}\n+\n+\t\tif (q_vector->num_ring_tx) {\n+\t\t\tq_vector->tx.itr =\n+\t\t\t\tITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,\n+\t\t\t\t\t   itr_gran);\n+\t\t\tq_vector->tx.latency_range = ICE_LOW_LATENCY;\n+\t\t}\n+\t\twr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);\n+\t\twr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);\n+\n+\t\t/* Both Transmit Queue Interrupt Cause Control register\n+\t\t * and Receive Queue Interrupt Cause control register\n+\t\t * expects MSIX_INDX field to be the vector index\n+\t\t * within the function space and not the absolute\n+\t\t * vector index across PF or across device.\n+\t\t * For SR-IOV VF VSIs queue vector index always starts\n+\t\t * with 1 since first vector index(0) is used for OICR\n+\t\t * in VF space. Since VMDq and other PF VSIs are withtin\n+\t\t * the PF function space, use the vector index thats\n+\t\t * tracked for this PF.\n+\t\t */\n+\t\tfor (q = 0; q < q_vector->num_ring_tx; q++) {\n+\t\t\tu32 val;\n+\n+\t\t\titr = ICE_ITR_NONE;\n+\t\t\tval = QINT_TQCTL_CAUSE_ENA_M |\n+\t\t\t      (itr << QINT_TQCTL_ITR_INDX_S)  |\n+\t\t\t      (vector << QINT_TQCTL_MSIX_INDX_S);\n+\t\t\twr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);\n+\t\t\ttxq++;\n+\t\t}\n+\n+\t\tfor (q = 0; q < q_vector->num_ring_rx; q++) {\n+\t\t\tu32 val;\n+\n+\t\t\titr = ICE_ITR_NONE;\n+\t\t\tval = QINT_RQCTL_CAUSE_ENA_M |\n+\t\t\t      (itr << QINT_RQCTL_ITR_INDX_S)  |\n+\t\t\t      (vector << QINT_RQCTL_MSIX_INDX_S);\n+\t\t\twr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);\n+\t\t\trxq++;\n+\t\t}\n+\t}\n+\n+\tice_flush(hw);\n+}\n+\n /**\n  * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx\n  * @vsi: the vsi being changed\n@@ -256,3 +645,105 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)\n \tvsi->info.vlan_flags = ctxt.info.vlan_flags;\n \treturn 0;\n }\n+\n+/**\n+ * ice_vsi_start_rx_rings - start VSI's rx rings\n+ * @vsi: the VSI whose rings are to be started\n+ *\n+ * Returns 0 on success and a negative value on error\n+ */\n+int ice_vsi_start_rx_rings(struct ice_vsi *vsi)\n+{\n+\treturn ice_vsi_ctrl_rx_rings(vsi, true);\n+}\n+\n+/**\n+ * ice_vsi_stop_rx_rings - stop VSI's rx rings\n+ * @vsi: the VSI\n+ *\n+ * Returns 0 on success and a negative value on error\n+ */\n+int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)\n+{\n+\treturn ice_vsi_ctrl_rx_rings(vsi, false);\n+}\n+\n+/**\n+ * ice_vsi_stop_tx_rings - Disable Tx rings\n+ * @vsi: the VSI being configured\n+ */\n+int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tenum ice_status status;\n+\tu32 *q_teids, val;\n+\tu16 *q_ids, i;\n+\tint err = 0;\n+\n+\tif (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)\n+\t\treturn -EINVAL;\n+\n+\tq_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),\n+\t\t\t       GFP_KERNEL);\n+\tif (!q_teids)\n+\t\treturn -ENOMEM;\n+\n+\tq_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),\n+\t\t\t     GFP_KERNEL);\n+\tif (!q_ids) {\n+\t\terr = -ENOMEM;\n+\t\tgoto err_alloc_q_ids;\n+\t}\n+\n+\t/* set up the tx queue list to be disabled */\n+\tice_for_each_txq(vsi, i) {\n+\t\tu16 v_idx;\n+\n+\t\tif (!vsi->tx_rings || !vsi->tx_rings[i]) {\n+\t\t\terr = -EINVAL;\n+\t\t\tgoto err_out;\n+\t\t}\n+\n+\t\tq_ids[i] = vsi->txq_map[i];\n+\t\tq_teids[i] = vsi->tx_rings[i]->txq_teid;\n+\n+\t\t/* clear cause_ena bit for disabled queues */\n+\t\tval = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));\n+\t\tval &= ~QINT_TQCTL_CAUSE_ENA_M;\n+\t\twr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);\n+\n+\t\t/* software is expected to wait for 100 ns */\n+\t\tndelay(100);\n+\n+\t\t/* trigger a software interrupt for the vector associated to\n+\t\t * the queue to schedule napi handler\n+\t\t */\n+\t\tv_idx = vsi->tx_rings[i]->q_vector->v_idx;\n+\t\twr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),\n+\t\t     GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);\n+\t}\n+\tstatus = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,\n+\t\t\t\t NULL);\n+\t/* if the disable queue command was exercised during an active reset\n+\t * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as\n+\t * the reset operation disables queues at the hardware level anyway.\n+\t */\n+\tif (status == ICE_ERR_RESET_ONGOING) {\n+\t\tdev_info(&pf->pdev->dev,\n+\t\t\t \"Reset in progress. LAN Tx queues already disabled\\n\");\n+\t} else if (status) {\n+\t\tdev_err(&pf->pdev->dev,\n+\t\t\t\"Failed to disable LAN Tx queues, error: %d\\n\",\n+\t\t\tstatus);\n+\t\terr = -ENODEV;\n+\t}\n+\n+err_out:\n+\tdevm_kfree(&pf->pdev->dev, q_ids);\n+\n+err_alloc_q_ids:\n+\tdevm_kfree(&pf->pdev->dev, q_teids);\n+\n+\treturn err;\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h\nindex c10874d26eee..ad4257929b9b 100644\n--- a/drivers/net/ethernet/intel/ice/ice_lib.h\n+++ b/drivers/net/ethernet/intel/ice/ice_lib.h\n@@ -13,6 +13,12 @@ void ice_free_fltr_list(struct device *dev, struct list_head *h);\n \n void ice_update_eth_stats(struct ice_vsi *vsi);\n \n+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);\n+\n+int ice_vsi_cfg_txqs(struct ice_vsi *vsi);\n+\n+void ice_vsi_cfg_msix(struct ice_vsi *vsi);\n+\n int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);\n \n int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);\n@@ -20,4 +26,11 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);\n int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi);\n \n int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena);\n+\n+int ice_vsi_start_rx_rings(struct ice_vsi *vsi);\n+\n+int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);\n+\n+int ice_vsi_stop_tx_rings(struct ice_vsi *vsi);\n+\n #endif /* !_ICE_LIB_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c\nindex a7c26b0f8f49..454479bf50e4 100644\n--- a/drivers/net/ethernet/intel/ice/ice_main.c\n+++ b/drivers/net/ethernet/intel/ice/ice_main.c\n@@ -1844,77 +1844,6 @@ static void ice_vsi_free_irq(struct ice_vsi *vsi)\n \t}\n }\n \n-/**\n- * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW\n- * @vsi: the VSI being configured\n- */\n-static void ice_vsi_cfg_msix(struct ice_vsi *vsi)\n-{\n-\tstruct ice_pf *pf = vsi->back;\n-\tu16 vector = vsi->base_vector;\n-\tstruct ice_hw *hw = &pf->hw;\n-\tu32 txq = 0, rxq = 0;\n-\tint i, q, itr;\n-\tu8 itr_gran;\n-\n-\tfor (i = 0; i < vsi->num_q_vectors; i++, vector++) {\n-\t\tstruct ice_q_vector *q_vector = vsi->q_vectors[i];\n-\n-\t\titr_gran = hw->itr_gran_200;\n-\n-\t\tif (q_vector->num_ring_rx) {\n-\t\t\tq_vector->rx.itr =\n-\t\t\t\tITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,\n-\t\t\t\t\t   itr_gran);\n-\t\t\tq_vector->rx.latency_range = ICE_LOW_LATENCY;\n-\t\t}\n-\n-\t\tif (q_vector->num_ring_tx) {\n-\t\t\tq_vector->tx.itr =\n-\t\t\t\tITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,\n-\t\t\t\t\t   itr_gran);\n-\t\t\tq_vector->tx.latency_range = ICE_LOW_LATENCY;\n-\t\t}\n-\t\twr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);\n-\t\twr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);\n-\n-\t\t/* Both Transmit Queue Interrupt Cause Control register\n-\t\t * and Receive Queue Interrupt Cause control register\n-\t\t * expects MSIX_INDX field to be the vector index\n-\t\t * within the function space and not the absolute\n-\t\t * vector index across PF or across device.\n-\t\t * For SR-IOV VF VSIs queue vector index always starts\n-\t\t * with 1 since first vector index(0) is used for OICR\n-\t\t * in VF space. Since VMDq and other PF VSIs are withtin\n-\t\t * the PF function space, use the vector index thats\n-\t\t * tracked for this PF.\n-\t\t */\n-\t\tfor (q = 0; q < q_vector->num_ring_tx; q++) {\n-\t\t\tu32 val;\n-\n-\t\t\titr = ICE_TX_ITR;\n-\t\t\tval = QINT_TQCTL_CAUSE_ENA_M |\n-\t\t\t      (itr << QINT_TQCTL_ITR_INDX_S)  |\n-\t\t\t      (vector << QINT_TQCTL_MSIX_INDX_S);\n-\t\t\twr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);\n-\t\t\ttxq++;\n-\t\t}\n-\n-\t\tfor (q = 0; q < q_vector->num_ring_rx; q++) {\n-\t\t\tu32 val;\n-\n-\t\t\titr = ICE_RX_ITR;\n-\t\t\tval = QINT_RQCTL_CAUSE_ENA_M |\n-\t\t\t      (itr << QINT_RQCTL_ITR_INDX_S)  |\n-\t\t\t      (vector << QINT_RQCTL_MSIX_INDX_S);\n-\t\t\twr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);\n-\t\t\trxq++;\n-\t\t}\n-\t}\n-\n-\tice_flush(hw);\n-}\n-\n /**\n  * ice_ena_misc_vector - enable the non-queue interrupts\n  * @pf: board private structure\n@@ -3966,248 +3895,6 @@ static int ice_restore_vlan(struct ice_vsi *vsi)\n \treturn err;\n }\n \n-/**\n- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance\n- * @ring: The Tx ring to configure\n- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized\n- * @pf_q: queue index in the PF space\n- *\n- * Configure the Tx descriptor ring in TLAN context.\n- */\n-static void\n-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)\n-{\n-\tstruct ice_vsi *vsi = ring->vsi;\n-\tstruct ice_hw *hw = &vsi->back->hw;\n-\n-\ttlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;\n-\n-\ttlan_ctx->port_num = vsi->port_info->lport;\n-\n-\t/* Transmit Queue Length */\n-\ttlan_ctx->qlen = ring->count;\n-\n-\t/* PF number */\n-\ttlan_ctx->pf_num = hw->pf_id;\n-\n-\t/* queue belongs to a specific VSI type\n-\t * VF / VM index should be programmed per vmvf_type setting:\n-\t * for vmvf_type = VF, it is VF number between 0-256\n-\t * for vmvf_type = VM, it is VM number between 0-767\n-\t * for PF or EMP this field should be set to zero\n-\t */\n-\tswitch (vsi->type) {\n-\tcase ICE_VSI_PF:\n-\t\ttlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;\n-\t\tbreak;\n-\tdefault:\n-\t\treturn;\n-\t}\n-\n-\t/* make sure the context is associated with the right VSI */\n-\ttlan_ctx->src_vsi = vsi->vsi_num;\n-\n-\ttlan_ctx->tso_ena = ICE_TX_LEGACY;\n-\ttlan_ctx->tso_qnum = pf_q;\n-\n-\t/* Legacy or Advanced Host Interface:\n-\t * 0: Advanced Host Interface\n-\t * 1: Legacy Host Interface\n-\t */\n-\ttlan_ctx->legacy_int = ICE_TX_LEGACY;\n-}\n-\n-/**\n- * ice_vsi_cfg_txqs - Configure the VSI for Tx\n- * @vsi: the VSI being configured\n- *\n- * Return 0 on success and a negative value on error\n- * Configure the Tx VSI for operation.\n- */\n-static int ice_vsi_cfg_txqs(struct ice_vsi *vsi)\n-{\n-\tstruct ice_aqc_add_tx_qgrp *qg_buf;\n-\tstruct ice_aqc_add_txqs_perq *txq;\n-\tstruct ice_pf *pf = vsi->back;\n-\tenum ice_status status;\n-\tu16 buf_len, i, pf_q;\n-\tint err = 0, tc = 0;\n-\tu8 num_q_grps;\n-\n-\tbuf_len = sizeof(struct ice_aqc_add_tx_qgrp);\n-\tqg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);\n-\tif (!qg_buf)\n-\t\treturn -ENOMEM;\n-\n-\tif (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {\n-\t\terr = -EINVAL;\n-\t\tgoto err_cfg_txqs;\n-\t}\n-\tqg_buf->num_txqs = 1;\n-\tnum_q_grps = 1;\n-\n-\t/* set up and configure the tx queues */\n-\tice_for_each_txq(vsi, i) {\n-\t\tstruct ice_tlan_ctx tlan_ctx = { 0 };\n-\n-\t\tpf_q = vsi->txq_map[i];\n-\t\tice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);\n-\t\t/* copy context contents into the qg_buf */\n-\t\tqg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);\n-\t\tice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,\n-\t\t\t    ice_tlan_ctx_info);\n-\n-\t\t/* init queue specific tail reg. It is referred as transmit\n-\t\t * comm scheduler queue doorbell.\n-\t\t */\n-\t\tvsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);\n-\t\tstatus = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,\n-\t\t\t\t\t num_q_grps, qg_buf, buf_len, NULL);\n-\t\tif (status) {\n-\t\t\tdev_err(&vsi->back->pdev->dev,\n-\t\t\t\t\"Failed to set LAN Tx queue context, error: %d\\n\",\n-\t\t\t\tstatus);\n-\t\t\terr = -ENODEV;\n-\t\t\tgoto err_cfg_txqs;\n-\t\t}\n-\n-\t\t/* Add Tx Queue TEID into the VSI tx ring from the response\n-\t\t * This will complete configuring and enabling the queue.\n-\t\t */\n-\t\ttxq = &qg_buf->txqs[0];\n-\t\tif (pf_q == le16_to_cpu(txq->txq_id))\n-\t\t\tvsi->tx_rings[i]->txq_teid =\n-\t\t\t\tle32_to_cpu(txq->q_teid);\n-\t}\n-err_cfg_txqs:\n-\tdevm_kfree(&pf->pdev->dev, qg_buf);\n-\treturn err;\n-}\n-\n-/**\n- * ice_setup_rx_ctx - Configure a receive ring context\n- * @ring: The Rx ring to configure\n- *\n- * Configure the Rx descriptor ring in RLAN context.\n- */\n-static int ice_setup_rx_ctx(struct ice_ring *ring)\n-{\n-\tstruct ice_vsi *vsi = ring->vsi;\n-\tstruct ice_hw *hw = &vsi->back->hw;\n-\tu32 rxdid = ICE_RXDID_FLEX_NIC;\n-\tstruct ice_rlan_ctx rlan_ctx;\n-\tu32 regval;\n-\tu16 pf_q;\n-\tint err;\n-\n-\t/* what is RX queue number in global space of 2K rx queues */\n-\tpf_q = vsi->rxq_map[ring->q_index];\n-\n-\t/* clear the context structure first */\n-\tmemset(&rlan_ctx, 0, sizeof(rlan_ctx));\n-\n-\trlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;\n-\n-\trlan_ctx.qlen = ring->count;\n-\n-\t/* Receive Packet Data Buffer Size.\n-\t * The Packet Data Buffer Size is defined in 128 byte units.\n-\t */\n-\trlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;\n-\n-\t/* use 32 byte descriptors */\n-\trlan_ctx.dsize = 1;\n-\n-\t/* Strip the Ethernet CRC bytes before the packet is posted to host\n-\t * memory.\n-\t */\n-\trlan_ctx.crcstrip = 1;\n-\n-\t/* L2TSEL flag defines the reported L2 Tags in the receive descriptor */\n-\trlan_ctx.l2tsel = 1;\n-\n-\trlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;\n-\trlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;\n-\trlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;\n-\n-\t/* This controls whether VLAN is stripped from inner headers\n-\t * The VLAN in the inner L2 header is stripped to the receive\n-\t * descriptor if enabled by this flag.\n-\t */\n-\trlan_ctx.showiv = 0;\n-\n-\t/* Max packet size for this queue - must not be set to a larger value\n-\t * than 5 x DBUF\n-\t */\n-\trlan_ctx.rxmax = min_t(u16, vsi->max_frame,\n-\t\t\t       ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);\n-\n-\t/* Rx queue threshold in units of 64 */\n-\trlan_ctx.lrxqthresh = 1;\n-\n-\t /* Enable Flexible Descriptors in the queue context which\n-\t  * allows this driver to select a specific receive descriptor format\n-\t  */\n-\tregval = rd32(hw, QRXFLXP_CNTXT(pf_q));\n-\tregval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &\n-\t\tQRXFLXP_CNTXT_RXDID_IDX_M;\n-\n-\t/* increasing context priority to pick up profile id;\n-\t * default is 0x01; setting to 0x03 to ensure profile\n-\t * is programming if prev context is of same priority\n-\t */\n-\tregval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &\n-\t\tQRXFLXP_CNTXT_RXDID_PRIO_M;\n-\n-\twr32(hw, QRXFLXP_CNTXT(pf_q), regval);\n-\n-\t/* Absolute queue number out of 2K needs to be passed */\n-\terr = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);\n-\tif (err) {\n-\t\tdev_err(&vsi->back->pdev->dev,\n-\t\t\t\"Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\\n\",\n-\t\t\tpf_q, err);\n-\t\treturn -EIO;\n-\t}\n-\n-\t/* init queue specific tail register */\n-\tring->tail = hw->hw_addr + QRX_TAIL(pf_q);\n-\twritel(0, ring->tail);\n-\tice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));\n-\n-\treturn 0;\n-}\n-\n-/**\n- * ice_vsi_cfg_rxqs - Configure the VSI for Rx\n- * @vsi: the VSI being configured\n- *\n- * Return 0 on success and a negative value on error\n- * Configure the Rx VSI for operation.\n- */\n-static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)\n-{\n-\tint err = 0;\n-\tu16 i;\n-\n-\tif (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)\n-\t\tvsi->max_frame = vsi->netdev->mtu +\n-\t\t\tETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;\n-\telse\n-\t\tvsi->max_frame = ICE_RXBUF_2048;\n-\n-\tvsi->rx_buf_len = ICE_RXBUF_2048;\n-\t/* set up individual rings */\n-\tfor (i = 0; i < vsi->num_rxq && !err; i++)\n-\t\terr = ice_setup_rx_ctx(vsi->rx_rings[i]);\n-\n-\tif (err) {\n-\t\tdev_err(&vsi->back->pdev->dev, \"ice_setup_rx_ctx failed\\n\");\n-\t\treturn -EIO;\n-\t}\n-\treturn err;\n-}\n-\n /**\n  * ice_vsi_cfg - Setup the VSI\n  * @vsi: the VSI being configured\n@@ -4232,207 +3919,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)\n \treturn err;\n }\n \n-/**\n- * ice_vsi_stop_tx_rings - Disable Tx rings\n- * @vsi: the VSI being configured\n- */\n-static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)\n-{\n-\tstruct ice_pf *pf = vsi->back;\n-\tstruct ice_hw *hw = &pf->hw;\n-\tenum ice_status status;\n-\tu32 *q_teids, val;\n-\tu16 *q_ids, i;\n-\tint err = 0;\n-\n-\tif (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)\n-\t\treturn -EINVAL;\n-\n-\tq_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),\n-\t\t\t       GFP_KERNEL);\n-\tif (!q_teids)\n-\t\treturn -ENOMEM;\n-\n-\tq_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),\n-\t\t\t     GFP_KERNEL);\n-\tif (!q_ids) {\n-\t\terr = -ENOMEM;\n-\t\tgoto err_alloc_q_ids;\n-\t}\n-\n-\t/* set up the tx queue list to be disabled */\n-\tice_for_each_txq(vsi, i) {\n-\t\tu16 v_idx;\n-\n-\t\tif (!vsi->tx_rings || !vsi->tx_rings[i]) {\n-\t\t\terr = -EINVAL;\n-\t\t\tgoto err_out;\n-\t\t}\n-\n-\t\tq_ids[i] = vsi->txq_map[i];\n-\t\tq_teids[i] = vsi->tx_rings[i]->txq_teid;\n-\n-\t\t/* clear cause_ena bit for disabled queues */\n-\t\tval = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));\n-\t\tval &= ~QINT_TQCTL_CAUSE_ENA_M;\n-\t\twr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);\n-\n-\t\t/* software is expected to wait for 100 ns */\n-\t\tndelay(100);\n-\n-\t\t/* trigger a software interrupt for the vector associated to\n-\t\t * the queue to schedule napi handler\n-\t\t */\n-\t\tv_idx = vsi->tx_rings[i]->q_vector->v_idx;\n-\t\twr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),\n-\t\t     GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);\n-\t}\n-\tstatus = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,\n-\t\t\t\t NULL);\n-\t/* if the disable queue command was exercised during an active reset\n-\t * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as\n-\t * the reset operation disables queues at the hardware level anyway.\n-\t */\n-\tif (status == ICE_ERR_RESET_ONGOING) {\n-\t\tdev_dbg(&pf->pdev->dev,\n-\t\t\t\"Reset in progress. LAN Tx queues already disabled\\n\");\n-\t} else if (status) {\n-\t\tdev_err(&pf->pdev->dev,\n-\t\t\t\"Failed to disable LAN Tx queues, error: %d\\n\",\n-\t\t\tstatus);\n-\t\terr = -ENODEV;\n-\t}\n-\n-err_out:\n-\tdevm_kfree(&pf->pdev->dev, q_ids);\n-\n-err_alloc_q_ids:\n-\tdevm_kfree(&pf->pdev->dev, q_teids);\n-\n-\treturn err;\n-}\n-\n-/**\n- * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled\n- * @pf: the PF being configured\n- * @pf_q: the PF queue\n- * @ena: enable or disable state of the queue\n- *\n- * This routine will wait for the given Rx queue of the PF to reach the\n- * enabled or disabled state.\n- * Returns -ETIMEDOUT in case of failing to reach the requested state after\n- * multiple retries; else will return 0 in case of success.\n- */\n-static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)\n-{\n-\tint i;\n-\n-\tfor (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {\n-\t\tu32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));\n-\n-\t\tif (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))\n-\t\t\tbreak;\n-\n-\t\tusleep_range(10, 20);\n-\t}\n-\tif (i >= ICE_Q_WAIT_RETRY_LIMIT)\n-\t\treturn -ETIMEDOUT;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings\n- * @vsi: the VSI being configured\n- * @ena: start or stop the rx rings\n- */\n-static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)\n-{\n-\tstruct ice_pf *pf = vsi->back;\n-\tstruct ice_hw *hw = &pf->hw;\n-\tint i, j, ret = 0;\n-\n-\tfor (i = 0; i < vsi->num_rxq; i++) {\n-\t\tint pf_q = vsi->rxq_map[i];\n-\t\tu32 rx_reg;\n-\n-\t\tfor (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {\n-\t\t\trx_reg = rd32(hw, QRX_CTRL(pf_q));\n-\t\t\tif (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==\n-\t\t\t    ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))\n-\t\t\t\tbreak;\n-\t\t\tusleep_range(1000, 2000);\n-\t\t}\n-\n-\t\t/* Skip if the queue is already in the requested state */\n-\t\tif (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))\n-\t\t\tcontinue;\n-\n-\t\t/* turn on/off the queue */\n-\t\tif (ena)\n-\t\t\trx_reg |= QRX_CTRL_QENA_REQ_M;\n-\t\telse\n-\t\t\trx_reg &= ~QRX_CTRL_QENA_REQ_M;\n-\t\twr32(hw, QRX_CTRL(pf_q), rx_reg);\n-\n-\t\t/* wait for the change to finish */\n-\t\tret = ice_pf_rxq_wait(pf, pf_q, ena);\n-\t\tif (ret) {\n-\t\t\tdev_err(&pf->pdev->dev,\n-\t\t\t\t\"VSI idx %d Rx ring %d %sable timeout\\n\",\n-\t\t\t\tvsi->idx, pf_q, (ena ? \"en\" : \"dis\"));\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\n-\treturn ret;\n-}\n-\n-/**\n- * ice_vsi_start_rx_rings - start VSI's rx rings\n- * @vsi: the VSI whose rings are to be started\n- *\n- * Returns 0 on success and a negative value on error\n- */\n-static int ice_vsi_start_rx_rings(struct ice_vsi *vsi)\n-{\n-\treturn ice_vsi_ctrl_rx_rings(vsi, true);\n-}\n-\n-/**\n- * ice_vsi_stop_rx_rings - stop VSI's rx rings\n- * @vsi: the VSI\n- *\n- * Returns 0 on success and a negative value on error\n- */\n-static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)\n-{\n-\treturn ice_vsi_ctrl_rx_rings(vsi, false);\n-}\n-\n-/**\n- * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings\n- * @vsi: the VSI\n- * Returns 0 on success and a negative value on error\n- */\n-static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi)\n-{\n-\tint err_tx, err_rx;\n-\n-\terr_tx = ice_vsi_stop_tx_rings(vsi);\n-\tif (err_tx)\n-\t\tdev_dbg(&vsi->back->pdev->dev, \"Failed to disable Tx rings\\n\");\n-\n-\terr_rx = ice_vsi_stop_rx_rings(vsi);\n-\tif (err_rx)\n-\t\tdev_dbg(&vsi->back->pdev->dev, \"Failed to disable Rx rings\\n\");\n-\n-\tif (err_tx || err_rx)\n-\t\treturn -EIO;\n-\n-\treturn 0;\n-}\n-\n /**\n  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI\n  * @vsi: the VSI being configured\n@@ -4846,7 +4332,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)\n  */\n int ice_down(struct ice_vsi *vsi)\n {\n-\tint i, err;\n+\tint i, tx_err, rx_err;\n \n \t/* Caller of this function is expected to set the\n \t * vsi->state __ICE_DOWN bit\n@@ -4857,7 +4343,18 @@ int ice_down(struct ice_vsi *vsi)\n \t}\n \n \tice_vsi_dis_irq(vsi);\n-\terr = ice_vsi_stop_tx_rx_rings(vsi);\n+\ttx_err = ice_vsi_stop_tx_rings(vsi);\n+\tif (tx_err)\n+\t\tnetdev_err(vsi->netdev,\n+\t\t\t   \"Failed stop tx rings, VSI %d error %d\\n\",\n+\t\t\t   vsi->vsi_num, tx_err);\n+\n+\trx_err = ice_vsi_stop_rx_rings(vsi);\n+\tif (rx_err)\n+\t\tnetdev_err(vsi->netdev,\n+\t\t\t   \"Failed stop rx rings, VSI %d error %d\\n\",\n+\t\t\t   vsi->vsi_num, rx_err);\n+\n \tice_napi_disable_all(vsi);\n \n \tice_for_each_txq(vsi, i)\n@@ -4866,10 +4363,14 @@ int ice_down(struct ice_vsi *vsi)\n \tice_for_each_rxq(vsi, i)\n \t\tice_clean_rx_ring(vsi->rx_rings[i]);\n \n-\tif (err)\n-\t\tnetdev_err(vsi->netdev, \"Failed to close VSI 0x%04X on switch 0x%04X\\n\",\n+\tif (tx_err || rx_err) {\n+\t\tnetdev_err(vsi->netdev,\n+\t\t\t   \"Failed to close VSI 0x%04X on switch 0x%04X\\n\",\n \t\t\t   vsi->vsi_num, vsi->vsw->sw_id);\n-\treturn err;\n+\t\treturn -EIO;\n+\t}\n+\n+\treturn 0;\n }\n \n /**\n@@ -4889,6 +4390,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)\n \t}\n \n \tice_for_each_txq(vsi, i) {\n+\t\tvsi->tx_rings[i]->netdev = vsi->netdev;\n \t\terr = ice_setup_tx_ring(vsi->tx_rings[i]);\n \t\tif (err)\n \t\t\tbreak;\n@@ -4914,6 +4416,7 @@ static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)\n \t}\n \n \tice_for_each_rxq(vsi, i) {\n+\t\tvsi->rx_rings[i]->netdev = vsi->netdev;\n \t\terr = ice_setup_rx_ring(vsi->rx_rings[i]);\n \t\tif (err)\n \t\t\tbreak;\n",
    "prefixes": [
        "02/16"
    ]
}