Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1178243/?format=api
{ "id": 1178243, "url": "http://patchwork.ozlabs.org/api/patches/1178243/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191016150201.41597-1-anthony.l.nguyen@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20191016150201.41597-1-anthony.l.nguyen@intel.com>", "list_archive_url": null, "date": "2019-10-16T15:01:53", "name": "[S30,v2,1/9] ice: Introduce ice_base.c", "commit_ref": null, "pull_url": null, "state": "superseded", "archived": false, "hash": "bb4f4197ac37989b18cddde2d1ec40675a7ecf1b", "submitter": { "id": 68875, "url": "http://patchwork.ozlabs.org/api/people/68875/?format=api", "name": "Tony Nguyen", "email": "anthony.l.nguyen@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191016150201.41597-1-anthony.l.nguyen@intel.com/mbox/", "series": [ { "id": 136670, "url": "http://patchwork.ozlabs.org/api/series/136670/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=136670", "date": "2019-10-16T15:01:54", "name": "[S30,v2,1/9] ice: Introduce ice_base.c", "version": 2, "mbox": "http://patchwork.ozlabs.org/series/136670/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1178243/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/1178243/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org; spf=pass (sender SPF authorized)\n\tsmtp.mailfrom=osuosl.org (client-ip=140.211.166.138;\n\thelo=whitealder.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com" ], "Received": [ "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 46tpVw0K37z9sNx\n\tfor <incoming@patchwork.ozlabs.org>;\n\tThu, 17 Oct 2019 10:32:40 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 75E4B87C05;\n\tWed, 16 Oct 2019 23:32:38 +0000 (UTC)", "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id eqKywN+LSSui; Wed, 16 Oct 2019 23:32:19 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id C9F4487BE5;\n\tWed, 16 Oct 2019 23:32:15 +0000 (UTC)", "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id 183771BF5A1\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 16 Oct 2019 23:32:13 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 05D708786C\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 16 Oct 2019 23:32:13 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id L74fDG8rgFFg for <intel-wired-lan@lists.osuosl.org>;\n\tWed, 16 Oct 2019 23:32:07 +0000 (UTC)", "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id AEA86877E1\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 16 Oct 2019 23:32:07 +0000 (UTC)", "from orsmga007.jf.intel.com ([10.7.209.58])\n\tby fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t16 Oct 2019 16:32:07 -0700", "from unknown (HELO localhost.jf.intel.com) ([10.166.244.174])\n\tby orsmga007.jf.intel.com with ESMTP; 16 Oct 2019 16:32:06 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.67,305,1566889200\"; d=\"scan'208\";a=\"186310789\"", "From": "Tony Nguyen <anthony.l.nguyen@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Wed, 16 Oct 2019 08:01:53 -0700", "Message-Id": "<20191016150201.41597-1-anthony.l.nguyen@intel.com>", "X-Mailer": "git-send-email 2.20.1", "MIME-Version": "1.0", "Subject": "[Intel-wired-lan] [PATCH S30 v2 1/9] ice: Introduce ice_base.c", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "From: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>\n\nRemove a few uses of kernel configuration flags from ice_lib.c by\nintroducing a new source file ice_base.c. Also move corresponding\nfunction prototypes from ice_lib.h to ice_base.h and include ice_base.h\nwhere required.\n\nSigned-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>\nSigned-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>\n---\n drivers/net/ethernet/intel/ice/Makefile | 1 +\n drivers/net/ethernet/intel/ice/ice.h | 8 +\n drivers/net/ethernet/intel/ice/ice_base.c | 767 +++++++++++++++++\n drivers/net/ethernet/intel/ice/ice_base.h | 31 +\n drivers/net/ethernet/intel/ice/ice_dcb_lib.h | 1 +\n drivers/net/ethernet/intel/ice/ice_lib.c | 787 +-----------------\n drivers/net/ethernet/intel/ice/ice_lib.h | 39 -\n drivers/net/ethernet/intel/ice/ice_main.c | 1 +\n drivers/net/ethernet/intel/ice/ice_txrx.h | 2 -\n .../net/ethernet/intel/ice/ice_virtchnl_pf.c | 1 +\n 10 files changed, 811 insertions(+), 827 deletions(-)\n create mode 100644 drivers/net/ethernet/intel/ice/ice_base.c\n create mode 100644 drivers/net/ethernet/intel/ice/ice_base.h", "diff": "diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile\nindex 2f0ba4aa4957..ff737f880c12 100644\n--- a/drivers/net/ethernet/intel/ice/Makefile\n+++ b/drivers/net/ethernet/intel/ice/Makefile\n@@ -13,6 +13,7 @@ ice-y := ice_main.o\t\\\n \t ice_nvm.o\t\\\n \t ice_switch.o\t\\\n \t ice_sched.o\t\\\n+\t ice_base.o\t\\\n \t ice_lib.o\t\\\n \t ice_txrx.o\t\\\n \t ice_flex_pipe.o\t\\\ndiff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h\nindex b8f2a6e26d0f..42eaae2f356d 100644\n--- a/drivers/net/ethernet/intel/ice/ice.h\n+++ b/drivers/net/ethernet/intel/ice/ice.h\n@@ -130,6 +130,14 @@ extern const char ice_drv_ver[];\n \t\t\t\t ICE_PROMISC_VLAN_TX | \\\n \t\t\t\t ICE_PROMISC_VLAN_RX)\n \n+struct ice_txq_meta {\n+\tu32 q_teid;\t/* Tx-scheduler element identifier */\n+\tu16 q_id;\t/* Entry in VSI's txq_map bitmap */\n+\tu16 q_handle;\t/* Relative index of Tx queue within TC */\n+\tu16 vsi_idx;\t/* VSI index that Tx queue belongs to */\n+\tu8 tc;\t\t/* TC number that Tx queue belongs to */\n+};\n+\n struct ice_tc_info {\n \tu16 qoffset;\n \tu16 qcount_tx;\ndiff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c\nnew file mode 100644\nindex 000000000000..735922a4d632\n--- /dev/null\n+++ b/drivers/net/ethernet/intel/ice/ice_base.c\n@@ -0,0 +1,767 @@\n+// SPDX-License-Identifier: GPL-2.0\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#include \"ice_base.h\"\n+#include \"ice_dcb_lib.h\"\n+\n+/**\n+ * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI\n+ * @qs_cfg: gathered variables needed for PF->VSI queues assignment\n+ *\n+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap\n+ */\n+static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)\n+{\n+\tint offset, i;\n+\n+\tmutex_lock(qs_cfg->qs_mutex);\n+\toffset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,\n+\t\t\t\t\t 0, qs_cfg->q_count, 0);\n+\tif (offset >= qs_cfg->pf_map_size) {\n+\t\tmutex_unlock(qs_cfg->qs_mutex);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tbitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);\n+\tfor (i = 0; i < qs_cfg->q_count; i++)\n+\t\tqs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;\n+\tmutex_unlock(qs_cfg->qs_mutex);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI\n+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment\n+ *\n+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap\n+ */\n+static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)\n+{\n+\tint i, index = 0;\n+\n+\tmutex_lock(qs_cfg->qs_mutex);\n+\tfor (i = 0; i < qs_cfg->q_count; i++) {\n+\t\tindex = find_next_zero_bit(qs_cfg->pf_map,\n+\t\t\t\t\t qs_cfg->pf_map_size, index);\n+\t\tif (index >= qs_cfg->pf_map_size)\n+\t\t\tgoto err_scatter;\n+\t\tset_bit(index, qs_cfg->pf_map);\n+\t\tqs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;\n+\t}\n+\tmutex_unlock(qs_cfg->qs_mutex);\n+\n+\treturn 0;\n+err_scatter:\n+\tfor (index = 0; index < i; index++) {\n+\t\tclear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);\n+\t\tqs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;\n+\t}\n+\tmutex_unlock(qs_cfg->qs_mutex);\n+\n+\treturn -ENOMEM;\n+}\n+\n+/**\n+ * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled\n+ * @pf: the PF being configured\n+ * @pf_q: the PF queue\n+ * @ena: enable or disable state of the queue\n+ *\n+ * This routine will wait for the given Rx queue of the PF to reach the\n+ * enabled or disabled state.\n+ * Returns -ETIMEDOUT in case of failing to reach the requested state after\n+ * multiple retries; else will return 0 in case of success.\n+ */\n+static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {\n+\t\tif (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &\n+\t\t\t QRX_CTRL_QENA_STAT_M))\n+\t\t\treturn 0;\n+\n+\t\tusleep_range(20, 40);\n+\t}\n+\n+\treturn -ETIMEDOUT;\n+}\n+\n+/**\n+ * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector\n+ * @vsi: the VSI being configured\n+ * @v_idx: index of the vector in the VSI struct\n+ *\n+ * We allocate one q_vector. If allocation fails we return -ENOMEM.\n+ */\n+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_q_vector *q_vector;\n+\n+\t/* allocate q_vector */\n+\tq_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);\n+\tif (!q_vector)\n+\t\treturn -ENOMEM;\n+\n+\tq_vector->vsi = vsi;\n+\tq_vector->v_idx = v_idx;\n+\tif (vsi->type == ICE_VSI_VF)\n+\t\tgoto out;\n+\t/* only set affinity_mask if the CPU is online */\n+\tif (cpu_online(v_idx))\n+\t\tcpumask_set_cpu(v_idx, &q_vector->affinity_mask);\n+\n+\t/* This will not be called in the driver load path because the netdev\n+\t * will not be created yet. All other cases with register the NAPI\n+\t * handler here (i.e. resume, reset/rebuild, etc.)\n+\t */\n+\tif (vsi->netdev)\n+\t\tnetif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,\n+\t\t\t NAPI_POLL_WEIGHT);\n+\n+out:\n+\t/* tie q_vector and VSI together */\n+\tvsi->q_vectors[v_idx] = q_vector;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_free_q_vector - Free memory allocated for a specific interrupt vector\n+ * @vsi: VSI having the memory freed\n+ * @v_idx: index of the vector to be freed\n+ */\n+static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)\n+{\n+\tstruct ice_q_vector *q_vector;\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_ring *ring;\n+\n+\tif (!vsi->q_vectors[v_idx]) {\n+\t\tdev_dbg(&pf->pdev->dev, \"Queue vector at index %d not found\\n\",\n+\t\t\tv_idx);\n+\t\treturn;\n+\t}\n+\tq_vector = vsi->q_vectors[v_idx];\n+\n+\tice_for_each_ring(ring, q_vector->tx)\n+\t\tring->q_vector = NULL;\n+\tice_for_each_ring(ring, q_vector->rx)\n+\t\tring->q_vector = NULL;\n+\n+\t/* only VSI with an associated netdev is set up with NAPI */\n+\tif (vsi->netdev)\n+\t\tnetif_napi_del(&q_vector->napi);\n+\n+\tdevm_kfree(&pf->pdev->dev, q_vector);\n+\tvsi->q_vectors[v_idx] = NULL;\n+}\n+\n+/**\n+ * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set\n+ * @hw: board specific structure\n+ */\n+static void ice_cfg_itr_gran(struct ice_hw *hw)\n+{\n+\tu32 regval = rd32(hw, GLINT_CTL);\n+\n+\t/* no need to update global register if ITR gran is already set */\n+\tif (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&\n+\t (((regval & GLINT_CTL_ITR_GRAN_200_M) >>\n+\t GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&\n+\t (((regval & GLINT_CTL_ITR_GRAN_100_M) >>\n+\t GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&\n+\t (((regval & GLINT_CTL_ITR_GRAN_50_M) >>\n+\t GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&\n+\t (((regval & GLINT_CTL_ITR_GRAN_25_M) >>\n+\t GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))\n+\t\treturn;\n+\n+\tregval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &\n+\t\t GLINT_CTL_ITR_GRAN_200_M) |\n+\t\t ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &\n+\t\t GLINT_CTL_ITR_GRAN_100_M) |\n+\t\t ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &\n+\t\t GLINT_CTL_ITR_GRAN_50_M) |\n+\t\t ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &\n+\t\t GLINT_CTL_ITR_GRAN_25_M);\n+\twr32(hw, GLINT_CTL, regval);\n+}\n+\n+/**\n+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance\n+ * @ring: The Tx ring to configure\n+ * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized\n+ * @pf_q: queue index in the PF space\n+ *\n+ * Configure the Tx descriptor ring in TLAN context.\n+ */\n+static void\n+ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)\n+{\n+\tstruct ice_vsi *vsi = ring->vsi;\n+\tstruct ice_hw *hw = &vsi->back->hw;\n+\n+\ttlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;\n+\n+\ttlan_ctx->port_num = vsi->port_info->lport;\n+\n+\t/* Transmit Queue Length */\n+\ttlan_ctx->qlen = ring->count;\n+\n+\tice_set_cgd_num(tlan_ctx, ring);\n+\n+\t/* PF number */\n+\ttlan_ctx->pf_num = hw->pf_id;\n+\n+\t/* queue belongs to a specific VSI type\n+\t * VF / VM index should be programmed per vmvf_type setting:\n+\t * for vmvf_type = VF, it is VF number between 0-256\n+\t * for vmvf_type = VM, it is VM number between 0-767\n+\t * for PF or EMP this field should be set to zero\n+\t */\n+\tswitch (vsi->type) {\n+\tcase ICE_VSI_LB:\n+\t\t/* fall through */\n+\tcase ICE_VSI_PF:\n+\t\ttlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;\n+\t\tbreak;\n+\tcase ICE_VSI_VF:\n+\t\t/* Firmware expects vmvf_num to be absolute VF ID */\n+\t\ttlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;\n+\t\ttlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn;\n+\t}\n+\n+\t/* make sure the context is associated with the right VSI */\n+\ttlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);\n+\n+\ttlan_ctx->tso_ena = ICE_TX_LEGACY;\n+\ttlan_ctx->tso_qnum = pf_q;\n+\n+\t/* Legacy or Advanced Host Interface:\n+\t * 0: Advanced Host Interface\n+\t * 1: Legacy Host Interface\n+\t */\n+\ttlan_ctx->legacy_int = ICE_TX_LEGACY;\n+}\n+\n+/**\n+ * ice_setup_rx_ctx - Configure a receive ring context\n+ * @ring: The Rx ring to configure\n+ *\n+ * Configure the Rx descriptor ring in RLAN context.\n+ */\n+int ice_setup_rx_ctx(struct ice_ring *ring)\n+{\n+\tstruct ice_vsi *vsi = ring->vsi;\n+\tstruct ice_hw *hw = &vsi->back->hw;\n+\tu32 rxdid = ICE_RXDID_FLEX_NIC;\n+\tstruct ice_rlan_ctx rlan_ctx;\n+\tu32 regval;\n+\tu16 pf_q;\n+\tint err;\n+\n+\t/* what is Rx queue number in global space of 2K Rx queues */\n+\tpf_q = vsi->rxq_map[ring->q_index];\n+\n+\t/* clear the context structure first */\n+\tmemset(&rlan_ctx, 0, sizeof(rlan_ctx));\n+\n+\trlan_ctx.base = ring->dma >> 7;\n+\n+\trlan_ctx.qlen = ring->count;\n+\n+\t/* Receive Packet Data Buffer Size.\n+\t * The Packet Data Buffer Size is defined in 128 byte units.\n+\t */\n+\trlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;\n+\n+\t/* use 32 byte descriptors */\n+\trlan_ctx.dsize = 1;\n+\n+\t/* Strip the Ethernet CRC bytes before the packet is posted to host\n+\t * memory.\n+\t */\n+\trlan_ctx.crcstrip = 1;\n+\n+\t/* L2TSEL flag defines the reported L2 Tags in the receive descriptor */\n+\trlan_ctx.l2tsel = 1;\n+\n+\trlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;\n+\trlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;\n+\trlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;\n+\n+\t/* This controls whether VLAN is stripped from inner headers\n+\t * The VLAN in the inner L2 header is stripped to the receive\n+\t * descriptor if enabled by this flag.\n+\t */\n+\trlan_ctx.showiv = 0;\n+\n+\t/* Max packet size for this queue - must not be set to a larger value\n+\t * than 5 x DBUF\n+\t */\n+\trlan_ctx.rxmax = min_t(u16, vsi->max_frame,\n+\t\t\t ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);\n+\n+\t/* Rx queue threshold in units of 64 */\n+\trlan_ctx.lrxqthresh = 1;\n+\n+\t /* Enable Flexible Descriptors in the queue context which\n+\t * allows this driver to select a specific receive descriptor format\n+\t */\n+\tif (vsi->type != ICE_VSI_VF) {\n+\t\tregval = rd32(hw, QRXFLXP_CNTXT(pf_q));\n+\t\tregval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &\n+\t\t\tQRXFLXP_CNTXT_RXDID_IDX_M;\n+\n+\t\t/* increasing context priority to pick up profile ID;\n+\t\t * default is 0x01; setting to 0x03 to ensure profile\n+\t\t * is programming if prev context is of same priority\n+\t\t */\n+\t\tregval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &\n+\t\t\tQRXFLXP_CNTXT_RXDID_PRIO_M;\n+\n+\t\twr32(hw, QRXFLXP_CNTXT(pf_q), regval);\n+\t}\n+\n+\t/* Absolute queue number out of 2K needs to be passed */\n+\terr = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);\n+\tif (err) {\n+\t\tdev_err(&vsi->back->pdev->dev,\n+\t\t\t\"Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\\n\",\n+\t\t\tpf_q, err);\n+\t\treturn -EIO;\n+\t}\n+\n+\tif (vsi->type == ICE_VSI_VF)\n+\t\treturn 0;\n+\n+\t/* init queue specific tail register */\n+\tring->tail = hw->hw_addr + QRX_TAIL(pf_q);\n+\twritel(0, ring->tail);\n+\tice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI\n+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment\n+ *\n+ * This function first tries to find contiguous space. If it is not successful,\n+ * it tries with the scatter approach.\n+ *\n+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap\n+ */\n+int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)\n+{\n+\tint ret = 0;\n+\n+\tret = __ice_vsi_get_qs_contig(qs_cfg);\n+\tif (ret) {\n+\t\t/* contig failed, so try with scatter approach */\n+\t\tqs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;\n+\t\tqs_cfg->q_count = min_t(u16, qs_cfg->q_count,\n+\t\t\t\t\tqs_cfg->scatter_count);\n+\t\tret = __ice_vsi_get_qs_sc(qs_cfg);\n+\t}\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring\n+ * @vsi: the VSI being configured\n+ * @ena: start or stop the Rx rings\n+ * @rxq_idx: Rx queue index\n+ */\n+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)\n+{\n+\tint pf_q = vsi->rxq_map[rxq_idx];\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tint ret = 0;\n+\tu32 rx_reg;\n+\n+\trx_reg = rd32(hw, QRX_CTRL(pf_q));\n+\n+\t/* Skip if the queue is already in the requested state */\n+\tif (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))\n+\t\treturn 0;\n+\n+\t/* turn on/off the queue */\n+\tif (ena)\n+\t\trx_reg |= QRX_CTRL_QENA_REQ_M;\n+\telse\n+\t\trx_reg &= ~QRX_CTRL_QENA_REQ_M;\n+\twr32(hw, QRX_CTRL(pf_q), rx_reg);\n+\n+\t/* wait for the change to finish */\n+\tret = ice_pf_rxq_wait(pf, pf_q, ena);\n+\tif (ret)\n+\t\tdev_err(&pf->pdev->dev,\n+\t\t\t\"VSI idx %d Rx ring %d %sable timeout\\n\",\n+\t\t\tvsi->idx, pf_q, (ena ? \"en\" : \"dis\"));\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors\n+ * @vsi: the VSI being configured\n+ *\n+ * We allocate one q_vector per queue interrupt. If allocation fails we\n+ * return -ENOMEM.\n+ */\n+int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tint v_idx = 0, num_q_vectors;\n+\tint err;\n+\n+\tif (vsi->q_vectors[0]) {\n+\t\tdev_dbg(&pf->pdev->dev, \"VSI %d has existing q_vectors\\n\",\n+\t\t\tvsi->vsi_num);\n+\t\treturn -EEXIST;\n+\t}\n+\n+\tnum_q_vectors = vsi->num_q_vectors;\n+\n+\tfor (v_idx = 0; v_idx < num_q_vectors; v_idx++) {\n+\t\terr = ice_vsi_alloc_q_vector(vsi, v_idx);\n+\t\tif (err)\n+\t\t\tgoto err_out;\n+\t}\n+\n+\treturn 0;\n+\n+err_out:\n+\twhile (v_idx--)\n+\t\tice_free_q_vector(vsi, v_idx);\n+\n+\tdev_err(&pf->pdev->dev,\n+\t\t\"Failed to allocate %d q_vector for VSI %d, ret=%d\\n\",\n+\t\tvsi->num_q_vectors, vsi->vsi_num, err);\n+\tvsi->num_q_vectors = 0;\n+\treturn err;\n+}\n+\n+/**\n+ * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors\n+ * @vsi: the VSI being configured\n+ *\n+ * This function maps descriptor rings to the queue-specific vectors allotted\n+ * through the MSI-X enabling code. On a constrained vector budget, we map Tx\n+ * and Rx rings to the vector as \"efficiently\" as possible.\n+ */\n+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)\n+{\n+\tint q_vectors = vsi->num_q_vectors;\n+\tint tx_rings_rem, rx_rings_rem;\n+\tint v_id;\n+\n+\t/* initially assigning remaining rings count to VSIs num queue value */\n+\ttx_rings_rem = vsi->num_txq;\n+\trx_rings_rem = vsi->num_rxq;\n+\n+\tfor (v_id = 0; v_id < q_vectors; v_id++) {\n+\t\tstruct ice_q_vector *q_vector = vsi->q_vectors[v_id];\n+\t\tint tx_rings_per_v, rx_rings_per_v, q_id, q_base;\n+\n+\t\t/* Tx rings mapping to vector */\n+\t\ttx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);\n+\t\tq_vector->num_ring_tx = tx_rings_per_v;\n+\t\tq_vector->tx.ring = NULL;\n+\t\tq_vector->tx.itr_idx = ICE_TX_ITR;\n+\t\tq_base = vsi->num_txq - tx_rings_rem;\n+\n+\t\tfor (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {\n+\t\t\tstruct ice_ring *tx_ring = vsi->tx_rings[q_id];\n+\n+\t\t\ttx_ring->q_vector = q_vector;\n+\t\t\ttx_ring->next = q_vector->tx.ring;\n+\t\t\tq_vector->tx.ring = tx_ring;\n+\t\t}\n+\t\ttx_rings_rem -= tx_rings_per_v;\n+\n+\t\t/* Rx rings mapping to vector */\n+\t\trx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);\n+\t\tq_vector->num_ring_rx = rx_rings_per_v;\n+\t\tq_vector->rx.ring = NULL;\n+\t\tq_vector->rx.itr_idx = ICE_RX_ITR;\n+\t\tq_base = vsi->num_rxq - rx_rings_rem;\n+\n+\t\tfor (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {\n+\t\t\tstruct ice_ring *rx_ring = vsi->rx_rings[q_id];\n+\n+\t\t\trx_ring->q_vector = q_vector;\n+\t\t\trx_ring->next = q_vector->rx.ring;\n+\t\t\tq_vector->rx.ring = rx_ring;\n+\t\t}\n+\t\trx_rings_rem -= rx_rings_per_v;\n+\t}\n+}\n+\n+/**\n+ * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors\n+ * @vsi: the VSI having memory freed\n+ */\n+void ice_vsi_free_q_vectors(struct ice_vsi *vsi)\n+{\n+\tint v_idx;\n+\n+\tice_for_each_q_vector(vsi, v_idx)\n+\t\tice_free_q_vector(vsi, v_idx);\n+}\n+\n+/**\n+ * ice_vsi_cfg_txq - Configure single Tx queue\n+ * @vsi: the VSI that queue belongs to\n+ * @ring: Tx ring to be configured\n+ * @tc_q_idx: queue index within given TC\n+ * @qg_buf: queue group buffer\n+ * @tc: TC that Tx ring belongs to\n+ */\n+int\n+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,\n+\t\tstruct ice_aqc_add_tx_qgrp *qg_buf, u8 tc)\n+{\n+\tstruct ice_tlan_ctx tlan_ctx = { 0 };\n+\tstruct ice_aqc_add_txqs_perq *txq;\n+\tstruct ice_pf *pf = vsi->back;\n+\tu8 buf_len = sizeof(*qg_buf);\n+\tenum ice_status status;\n+\tu16 pf_q;\n+\n+\tpf_q = ring->reg_idx;\n+\tice_setup_tx_ctx(ring, &tlan_ctx, pf_q);\n+\t/* copy context contents into the qg_buf */\n+\tqg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);\n+\tice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,\n+\t\t ice_tlan_ctx_info);\n+\n+\t/* init queue specific tail reg. It is referred as\n+\t * transmit comm scheduler queue doorbell.\n+\t */\n+\tring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);\n+\n+\t/* Add unique software queue handle of the Tx queue per\n+\t * TC into the VSI Tx ring\n+\t */\n+\tring->q_handle = tc_q_idx;\n+\n+\tstatus = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,\n+\t\t\t\t 1, qg_buf, buf_len, NULL);\n+\tif (status) {\n+\t\tdev_err(&pf->pdev->dev,\n+\t\t\t\"Failed to set LAN Tx queue context, error: %d\\n\",\n+\t\t\tstatus);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\t/* Add Tx Queue TEID into the VSI Tx ring from the\n+\t * response. This will complete configuring and\n+\t * enabling the queue.\n+\t */\n+\ttxq = &qg_buf->txqs[0];\n+\tif (pf_q == le16_to_cpu(txq->txq_id))\n+\t\tring->txq_teid = le32_to_cpu(txq->q_teid);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_cfg_itr - configure the initial interrupt throttle values\n+ * @hw: pointer to the HW structure\n+ * @q_vector: interrupt vector that's being configured\n+ *\n+ * Configure interrupt throttling values for the ring containers that are\n+ * associated with the interrupt vector passed in.\n+ */\n+void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)\n+{\n+\tice_cfg_itr_gran(hw);\n+\n+\tif (q_vector->num_ring_rx) {\n+\t\tstruct ice_ring_container *rc = &q_vector->rx;\n+\n+\t\t/* if this value is set then don't overwrite with default */\n+\t\tif (!rc->itr_setting)\n+\t\t\trc->itr_setting = ICE_DFLT_RX_ITR;\n+\n+\t\trc->target_itr = ITR_TO_REG(rc->itr_setting);\n+\t\trc->next_update = jiffies + 1;\n+\t\trc->current_itr = rc->target_itr;\n+\t\twr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),\n+\t\t ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);\n+\t}\n+\n+\tif (q_vector->num_ring_tx) {\n+\t\tstruct ice_ring_container *rc = &q_vector->tx;\n+\n+\t\t/* if this value is set then don't overwrite with default */\n+\t\tif (!rc->itr_setting)\n+\t\t\trc->itr_setting = ICE_DFLT_TX_ITR;\n+\n+\t\trc->target_itr = ITR_TO_REG(rc->itr_setting);\n+\t\trc->next_update = jiffies + 1;\n+\t\trc->current_itr = rc->target_itr;\n+\t\twr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),\n+\t\t ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);\n+\t}\n+}\n+\n+/**\n+ * ice_cfg_txq_interrupt - configure interrupt on Tx queue\n+ * @vsi: the VSI being configured\n+ * @txq: Tx queue being mapped to MSI-X vector\n+ * @msix_idx: MSI-X vector index within the function\n+ * @itr_idx: ITR index of the interrupt cause\n+ *\n+ * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector\n+ * within the function space.\n+ */\n+void\n+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tu32 val;\n+\n+\titr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;\n+\n+\tval = QINT_TQCTL_CAUSE_ENA_M | itr_idx |\n+\t ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);\n+\n+\twr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);\n+}\n+\n+/**\n+ * ice_cfg_rxq_interrupt - configure interrupt on Rx queue\n+ * @vsi: the VSI being configured\n+ * @rxq: Rx queue being mapped to MSI-X vector\n+ * @msix_idx: MSI-X vector index within the function\n+ * @itr_idx: ITR index of the interrupt cause\n+ *\n+ * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector\n+ * within the function space.\n+ */\n+void\n+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tu32 val;\n+\n+\titr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;\n+\n+\tval = QINT_RQCTL_CAUSE_ENA_M | itr_idx |\n+\t ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);\n+\n+\twr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);\n+\n+\tice_flush(hw);\n+}\n+\n+/**\n+ * ice_trigger_sw_intr - trigger a software interrupt\n+ * @hw: pointer to the HW structure\n+ * @q_vector: interrupt vector to trigger the software interrupt for\n+ */\n+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)\n+{\n+\twr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),\n+\t (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |\n+\t GLINT_DYN_CTL_SWINT_TRIG_M |\n+\t GLINT_DYN_CTL_INTENA_M);\n+}\n+\n+/**\n+ * ice_vsi_stop_tx_ring - Disable single Tx ring\n+ * @vsi: the VSI being configured\n+ * @rst_src: reset source\n+ * @rel_vmvf_num: Relative ID of VF/VM\n+ * @ring: Tx ring to be stopped\n+ * @txq_meta: Meta data of Tx ring to be stopped\n+ */\n+int\n+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,\n+\t\t u16 rel_vmvf_num, struct ice_ring *ring,\n+\t\t struct ice_txq_meta *txq_meta)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_q_vector *q_vector;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tenum ice_status status;\n+\tu32 val;\n+\n+\t/* clear cause_ena bit for disabled queues */\n+\tval = rd32(hw, QINT_TQCTL(ring->reg_idx));\n+\tval &= ~QINT_TQCTL_CAUSE_ENA_M;\n+\twr32(hw, QINT_TQCTL(ring->reg_idx), val);\n+\n+\t/* software is expected to wait for 100 ns */\n+\tndelay(100);\n+\n+\t/* trigger a software interrupt for the vector\n+\t * associated to the queue to schedule NAPI handler\n+\t */\n+\tq_vector = ring->q_vector;\n+\tif (q_vector)\n+\t\tice_trigger_sw_intr(hw, q_vector);\n+\n+\tstatus = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,\n+\t\t\t\t txq_meta->tc, 1, &txq_meta->q_handle,\n+\t\t\t\t &txq_meta->q_id, &txq_meta->q_teid, rst_src,\n+\t\t\t\t rel_vmvf_num, NULL);\n+\n+\t/* if the disable queue command was exercised during an\n+\t * active reset flow, ICE_ERR_RESET_ONGOING is returned.\n+\t * This is not an error as the reset operation disables\n+\t * queues at the hardware level anyway.\n+\t */\n+\tif (status == ICE_ERR_RESET_ONGOING) {\n+\t\tdev_dbg(&vsi->back->pdev->dev,\n+\t\t\t\"Reset in progress. LAN Tx queues already disabled\\n\");\n+\t} else if (status == ICE_ERR_DOES_NOT_EXIST) {\n+\t\tdev_dbg(&vsi->back->pdev->dev,\n+\t\t\t\"LAN Tx queues do not exist, nothing to disable\\n\");\n+\t} else if (status) {\n+\t\tdev_err(&vsi->back->pdev->dev,\n+\t\t\t\"Failed to disable LAN Tx queues, error: %d\\n\", status);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_fill_txq_meta - Prepare the Tx queue's meta data\n+ * @vsi: VSI that ring belongs to\n+ * @ring: ring that txq_meta will be based on\n+ * @txq_meta: a helper struct that wraps Tx queue's information\n+ *\n+ * Set up a helper struct that will contain all the necessary fields that\n+ * are needed for stopping Tx queue\n+ */\n+void\n+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,\n+\t\t struct ice_txq_meta *txq_meta)\n+{\n+\tu8 tc;\n+\n+\tif (IS_ENABLED(CONFIG_DCB))\n+\t\ttc = ring->dcb_tc;\n+\telse\n+\t\ttc = 0;\n+\n+\ttxq_meta->q_id = ring->reg_idx;\n+\ttxq_meta->q_teid = ring->txq_teid;\n+\ttxq_meta->q_handle = ring->q_handle;\n+\ttxq_meta->vsi_idx = vsi->idx;\n+\ttxq_meta->tc = tc;\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h\nnew file mode 100644\nindex 000000000000..db456862b35b\n--- /dev/null\n+++ b/drivers/net/ethernet/intel/ice/ice_base.h\n@@ -0,0 +1,31 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#ifndef _ICE_BASE_H_\n+#define _ICE_BASE_H_\n+\n+#include \"ice.h\"\n+\n+int ice_setup_rx_ctx(struct ice_ring *ring);\n+int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);\n+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);\n+int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);\n+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);\n+void ice_vsi_free_q_vectors(struct ice_vsi *vsi);\n+int\n+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,\n+\t\tstruct ice_aqc_add_tx_qgrp *qg_buf, u8 tc);\n+void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);\n+void\n+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);\n+void\n+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);\n+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);\n+int\n+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,\n+\t\t u16 rel_vmvf_num, struct ice_ring *ring,\n+\t\t struct ice_txq_meta *txq_meta);\n+void\n+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,\n+\t\t struct ice_txq_meta *txq_meta);\n+#endif /* _ICE_BASE_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h\nindex 6c0585d1bc97..a2364492c49b 100644\n--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h\n+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h\n@@ -5,6 +5,7 @@\n #define _ICE_DCB_LIB_H_\n \n #include \"ice.h\"\n+#include \"ice_base.h\"\n #include \"ice_lib.h\"\n \n #ifdef CONFIG_DCB\ndiff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c\nindex 6d191711caee..fe7f43d2e734 100644\n--- a/drivers/net/ethernet/intel/ice/ice_lib.c\n+++ b/drivers/net/ethernet/intel/ice/ice_lib.c\n@@ -2,234 +2,10 @@\n /* Copyright (c) 2018, Intel Corporation. */\n \n #include \"ice.h\"\n+#include \"ice_base.h\"\n #include \"ice_lib.h\"\n #include \"ice_dcb_lib.h\"\n \n-/**\n- * ice_setup_rx_ctx - Configure a receive ring context\n- * @ring: The Rx ring to configure\n- *\n- * Configure the Rx descriptor ring in RLAN context.\n- */\n-static int ice_setup_rx_ctx(struct ice_ring *ring)\n-{\n-\tstruct ice_vsi *vsi = ring->vsi;\n-\tstruct ice_hw *hw = &vsi->back->hw;\n-\tu32 rxdid = ICE_RXDID_FLEX_NIC;\n-\tstruct ice_rlan_ctx rlan_ctx;\n-\tu32 regval;\n-\tu16 pf_q;\n-\tint err;\n-\n-\t/* what is Rx queue number in global space of 2K Rx queues */\n-\tpf_q = vsi->rxq_map[ring->q_index];\n-\n-\t/* clear the context structure first */\n-\tmemset(&rlan_ctx, 0, sizeof(rlan_ctx));\n-\n-\trlan_ctx.base = ring->dma >> 7;\n-\n-\trlan_ctx.qlen = ring->count;\n-\n-\t/* Receive Packet Data Buffer Size.\n-\t * The Packet Data Buffer Size is defined in 128 byte units.\n-\t */\n-\trlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;\n-\n-\t/* use 32 byte descriptors */\n-\trlan_ctx.dsize = 1;\n-\n-\t/* Strip the Ethernet CRC bytes before the packet is posted to host\n-\t * memory.\n-\t */\n-\trlan_ctx.crcstrip = 1;\n-\n-\t/* L2TSEL flag defines the reported L2 Tags in the receive descriptor */\n-\trlan_ctx.l2tsel = 1;\n-\n-\trlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;\n-\trlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;\n-\trlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;\n-\n-\t/* This controls whether VLAN is stripped from inner headers\n-\t * The VLAN in the inner L2 header is stripped to the receive\n-\t * descriptor if enabled by this flag.\n-\t */\n-\trlan_ctx.showiv = 0;\n-\n-\t/* Max packet size for this queue - must not be set to a larger value\n-\t * than 5 x DBUF\n-\t */\n-\trlan_ctx.rxmax = min_t(u16, vsi->max_frame,\n-\t\t\t ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);\n-\n-\t/* Rx queue threshold in units of 64 */\n-\trlan_ctx.lrxqthresh = 1;\n-\n-\t /* Enable Flexible Descriptors in the queue context which\n-\t * allows this driver to select a specific receive descriptor format\n-\t */\n-\tif (vsi->type != ICE_VSI_VF) {\n-\t\tregval = rd32(hw, QRXFLXP_CNTXT(pf_q));\n-\t\tregval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &\n-\t\t\tQRXFLXP_CNTXT_RXDID_IDX_M;\n-\n-\t\t/* increasing context priority to pick up profile ID;\n-\t\t * default is 0x01; setting to 0x03 to ensure profile\n-\t\t * is programming if prev context is of same priority\n-\t\t */\n-\t\tregval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &\n-\t\t\tQRXFLXP_CNTXT_RXDID_PRIO_M;\n-\n-\t\twr32(hw, QRXFLXP_CNTXT(pf_q), regval);\n-\t}\n-\n-\t/* Absolute queue number out of 2K needs to be passed */\n-\terr = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);\n-\tif (err) {\n-\t\tdev_err(&vsi->back->pdev->dev,\n-\t\t\t\"Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\\n\",\n-\t\t\tpf_q, err);\n-\t\treturn -EIO;\n-\t}\n-\n-\tif (vsi->type == ICE_VSI_VF)\n-\t\treturn 0;\n-\n-\t/* init queue specific tail register */\n-\tring->tail = hw->hw_addr + QRX_TAIL(pf_q);\n-\twritel(0, ring->tail);\n-\tice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));\n-\n-\treturn 0;\n-}\n-\n-/**\n- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance\n- * @ring: The Tx ring to configure\n- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized\n- * @pf_q: queue index in the PF space\n- *\n- * Configure the Tx descriptor ring in TLAN context.\n- */\n-static void\n-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)\n-{\n-\tstruct ice_vsi *vsi = ring->vsi;\n-\tstruct ice_hw *hw = &vsi->back->hw;\n-\n-\ttlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;\n-\n-\ttlan_ctx->port_num = vsi->port_info->lport;\n-\n-\t/* Transmit Queue Length */\n-\ttlan_ctx->qlen = ring->count;\n-\n-\tice_set_cgd_num(tlan_ctx, ring);\n-\n-\t/* PF number */\n-\ttlan_ctx->pf_num = hw->pf_id;\n-\n-\t/* queue belongs to a specific VSI type\n-\t * VF / VM index should be programmed per vmvf_type setting:\n-\t * for vmvf_type = VF, it is VF number between 0-256\n-\t * for vmvf_type = VM, it is VM number between 0-767\n-\t * for PF or EMP this field should be set to zero\n-\t */\n-\tswitch (vsi->type) {\n-\tcase ICE_VSI_LB:\n-\t\t/* fall through */\n-\tcase ICE_VSI_PF:\n-\t\ttlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;\n-\t\tbreak;\n-\tcase ICE_VSI_VF:\n-\t\t/* Firmware expects vmvf_num to be absolute VF ID */\n-\t\ttlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;\n-\t\ttlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;\n-\t\tbreak;\n-\tdefault:\n-\t\treturn;\n-\t}\n-\n-\t/* make sure the context is associated with the right VSI */\n-\ttlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);\n-\n-\ttlan_ctx->tso_ena = ICE_TX_LEGACY;\n-\ttlan_ctx->tso_qnum = pf_q;\n-\n-\t/* Legacy or Advanced Host Interface:\n-\t * 0: Advanced Host Interface\n-\t * 1: Legacy Host Interface\n-\t */\n-\ttlan_ctx->legacy_int = ICE_TX_LEGACY;\n-}\n-\n-/**\n- * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled\n- * @pf: the PF being configured\n- * @pf_q: the PF queue\n- * @ena: enable or disable state of the queue\n- *\n- * This routine will wait for the given Rx queue of the PF to reach the\n- * enabled or disabled state.\n- * Returns -ETIMEDOUT in case of failing to reach the requested state after\n- * multiple retries; else will return 0 in case of success.\n- */\n-static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)\n-{\n-\tint i;\n-\n-\tfor (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {\n-\t\tif (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &\n-\t\t\t QRX_CTRL_QENA_STAT_M))\n-\t\t\treturn 0;\n-\n-\t\tusleep_range(20, 40);\n-\t}\n-\n-\treturn -ETIMEDOUT;\n-}\n-\n-/**\n- * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring\n- * @vsi: the VSI being configured\n- * @ena: start or stop the Rx rings\n- * @rxq_idx: Rx queue index\n- */\n-#ifndef CONFIG_PCI_IOV\n-static\n-#endif /* !CONFIG_PCI_IOV */\n-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)\n-{\n-\tint pf_q = vsi->rxq_map[rxq_idx];\n-\tstruct ice_pf *pf = vsi->back;\n-\tstruct ice_hw *hw = &pf->hw;\n-\tint ret = 0;\n-\tu32 rx_reg;\n-\n-\trx_reg = rd32(hw, QRX_CTRL(pf_q));\n-\n-\t/* Skip if the queue is already in the requested state */\n-\tif (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))\n-\t\treturn 0;\n-\n-\t/* turn on/off the queue */\n-\tif (ena)\n-\t\trx_reg |= QRX_CTRL_QENA_REQ_M;\n-\telse\n-\t\trx_reg &= ~QRX_CTRL_QENA_REQ_M;\n-\twr32(hw, QRX_CTRL(pf_q), rx_reg);\n-\n-\t/* wait for the change to finish */\n-\tret = ice_pf_rxq_wait(pf, pf_q, ena);\n-\tif (ret)\n-\t\tdev_err(&pf->pdev->dev,\n-\t\t\t\"VSI idx %d Rx ring %d %sable timeout\\n\",\n-\t\t\tvsi->idx, pf_q, (ena ? \"en\" : \"dis\"));\n-\n-\treturn ret;\n-}\n-\n /**\n * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings\n * @vsi: the VSI being configured\n@@ -281,7 +57,6 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)\n \tif (!vsi->rxq_map)\n \t\tgoto err_rxq_map;\n \n-\n \t/* There is no need to allocate q_vectors for a loopback VSI. */\n \tif (vsi->type == ICE_VSI_LB)\n \t\treturn 0;\n@@ -605,88 +380,6 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)\n \treturn vsi;\n }\n \n-/**\n- * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI\n- * @qs_cfg: gathered variables needed for PF->VSI queues assignment\n- *\n- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap\n- */\n-static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)\n-{\n-\tint offset, i;\n-\n-\tmutex_lock(qs_cfg->qs_mutex);\n-\toffset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,\n-\t\t\t\t\t 0, qs_cfg->q_count, 0);\n-\tif (offset >= qs_cfg->pf_map_size) {\n-\t\tmutex_unlock(qs_cfg->qs_mutex);\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\tbitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);\n-\tfor (i = 0; i < qs_cfg->q_count; i++)\n-\t\tqs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;\n-\tmutex_unlock(qs_cfg->qs_mutex);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI\n- * @qs_cfg: gathered variables needed for pf->vsi queues assignment\n- *\n- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap\n- */\n-static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)\n-{\n-\tint i, index = 0;\n-\n-\tmutex_lock(qs_cfg->qs_mutex);\n-\tfor (i = 0; i < qs_cfg->q_count; i++) {\n-\t\tindex = find_next_zero_bit(qs_cfg->pf_map,\n-\t\t\t\t\t qs_cfg->pf_map_size, index);\n-\t\tif (index >= qs_cfg->pf_map_size)\n-\t\t\tgoto err_scatter;\n-\t\tset_bit(index, qs_cfg->pf_map);\n-\t\tqs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;\n-\t}\n-\tmutex_unlock(qs_cfg->qs_mutex);\n-\n-\treturn 0;\n-err_scatter:\n-\tfor (index = 0; index < i; index++) {\n-\t\tclear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);\n-\t\tqs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;\n-\t}\n-\tmutex_unlock(qs_cfg->qs_mutex);\n-\n-\treturn -ENOMEM;\n-}\n-\n-/**\n- * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI\n- * @qs_cfg: gathered variables needed for pf->vsi queues assignment\n- *\n- * This function first tries to find contiguous space. If it is not successful,\n- * it tries with the scatter approach.\n- *\n- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap\n- */\n-static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)\n-{\n-\tint ret = 0;\n-\n-\tret = __ice_vsi_get_qs_contig(qs_cfg);\n-\tif (ret) {\n-\t\t/* contig failed, so try with scatter approach */\n-\t\tqs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;\n-\t\tqs_cfg->q_count = min_t(u16, qs_cfg->q_count,\n-\t\t\t\t\tqs_cfg->scatter_count);\n-\t\tret = __ice_vsi_get_qs_sc(qs_cfg);\n-\t}\n-\treturn ret;\n-}\n-\n /**\n * ice_vsi_get_qs - Assign queues from PF to VSI\n * @vsi: the VSI to assign queues to\n@@ -1108,129 +801,6 @@ static int ice_vsi_init(struct ice_vsi *vsi)\n \treturn ret;\n }\n \n-/**\n- * ice_free_q_vector - Free memory allocated for a specific interrupt vector\n- * @vsi: VSI having the memory freed\n- * @v_idx: index of the vector to be freed\n- */\n-static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)\n-{\n-\tstruct ice_q_vector *q_vector;\n-\tstruct ice_pf *pf = vsi->back;\n-\tstruct ice_ring *ring;\n-\n-\tif (!vsi->q_vectors[v_idx]) {\n-\t\tdev_dbg(&pf->pdev->dev, \"Queue vector at index %d not found\\n\",\n-\t\t\tv_idx);\n-\t\treturn;\n-\t}\n-\tq_vector = vsi->q_vectors[v_idx];\n-\n-\tice_for_each_ring(ring, q_vector->tx)\n-\t\tring->q_vector = NULL;\n-\tice_for_each_ring(ring, q_vector->rx)\n-\t\tring->q_vector = NULL;\n-\n-\t/* only VSI with an associated netdev is set up with NAPI */\n-\tif (vsi->netdev)\n-\t\tnetif_napi_del(&q_vector->napi);\n-\n-\tdevm_kfree(&pf->pdev->dev, q_vector);\n-\tvsi->q_vectors[v_idx] = NULL;\n-}\n-\n-/**\n- * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors\n- * @vsi: the VSI having memory freed\n- */\n-void ice_vsi_free_q_vectors(struct ice_vsi *vsi)\n-{\n-\tint v_idx;\n-\n-\tice_for_each_q_vector(vsi, v_idx)\n-\t\tice_free_q_vector(vsi, v_idx);\n-}\n-\n-/**\n- * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector\n- * @vsi: the VSI being configured\n- * @v_idx: index of the vector in the VSI struct\n- *\n- * We allocate one q_vector. If allocation fails we return -ENOMEM.\n- */\n-static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)\n-{\n-\tstruct ice_pf *pf = vsi->back;\n-\tstruct ice_q_vector *q_vector;\n-\n-\t/* allocate q_vector */\n-\tq_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);\n-\tif (!q_vector)\n-\t\treturn -ENOMEM;\n-\n-\tq_vector->vsi = vsi;\n-\tq_vector->v_idx = v_idx;\n-\tif (vsi->type == ICE_VSI_VF)\n-\t\tgoto out;\n-\t/* only set affinity_mask if the CPU is online */\n-\tif (cpu_online(v_idx))\n-\t\tcpumask_set_cpu(v_idx, &q_vector->affinity_mask);\n-\n-\t/* This will not be called in the driver load path because the netdev\n-\t * will not be created yet. All other cases with register the NAPI\n-\t * handler here (i.e. resume, reset/rebuild, etc.)\n-\t */\n-\tif (vsi->netdev)\n-\t\tnetif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,\n-\t\t\t NAPI_POLL_WEIGHT);\n-\n-out:\n-\t/* tie q_vector and VSI together */\n-\tvsi->q_vectors[v_idx] = q_vector;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors\n- * @vsi: the VSI being configured\n- *\n- * We allocate one q_vector per queue interrupt. If allocation fails we\n- * return -ENOMEM.\n- */\n-static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)\n-{\n-\tstruct ice_pf *pf = vsi->back;\n-\tint v_idx = 0, num_q_vectors;\n-\tint err;\n-\n-\tif (vsi->q_vectors[0]) {\n-\t\tdev_dbg(&pf->pdev->dev, \"VSI %d has existing q_vectors\\n\",\n-\t\t\tvsi->vsi_num);\n-\t\treturn -EEXIST;\n-\t}\n-\n-\tnum_q_vectors = vsi->num_q_vectors;\n-\n-\tfor (v_idx = 0; v_idx < num_q_vectors; v_idx++) {\n-\t\terr = ice_vsi_alloc_q_vector(vsi, v_idx);\n-\t\tif (err)\n-\t\t\tgoto err_out;\n-\t}\n-\n-\treturn 0;\n-\n-err_out:\n-\twhile (v_idx--)\n-\t\tice_free_q_vector(vsi, v_idx);\n-\n-\tdev_err(&pf->pdev->dev,\n-\t\t\"Failed to allocate %d q_vector for VSI %d, ret=%d\\n\",\n-\t\tvsi->num_q_vectors, vsi->vsi_num, err);\n-\tvsi->num_q_vectors = 0;\n-\treturn err;\n-}\n-\n /**\n * ice_vsi_setup_vector_base - Set up the base vector for the given VSI\n * @vsi: ptr to the VSI\n@@ -1351,66 +921,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)\n \treturn -ENOMEM;\n }\n \n-/**\n- * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors\n- * @vsi: the VSI being configured\n- *\n- * This function maps descriptor rings to the queue-specific vectors allotted\n- * through the MSI-X enabling code. On a constrained vector budget, we map Tx\n- * and Rx rings to the vector as \"efficiently\" as possible.\n- */\n-#ifdef CONFIG_DCB\n-void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)\n-#else\n-static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)\n-#endif /* CONFIG_DCB */\n-{\n-\tint q_vectors = vsi->num_q_vectors;\n-\tint tx_rings_rem, rx_rings_rem;\n-\tint v_id;\n-\n-\t/* initially assigning remaining rings count to VSIs num queue value */\n-\ttx_rings_rem = vsi->num_txq;\n-\trx_rings_rem = vsi->num_rxq;\n-\n-\tfor (v_id = 0; v_id < q_vectors; v_id++) {\n-\t\tstruct ice_q_vector *q_vector = vsi->q_vectors[v_id];\n-\t\tint tx_rings_per_v, rx_rings_per_v, q_id, q_base;\n-\n-\t\t/* Tx rings mapping to vector */\n-\t\ttx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);\n-\t\tq_vector->num_ring_tx = tx_rings_per_v;\n-\t\tq_vector->tx.ring = NULL;\n-\t\tq_vector->tx.itr_idx = ICE_TX_ITR;\n-\t\tq_base = vsi->num_txq - tx_rings_rem;\n-\n-\t\tfor (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {\n-\t\t\tstruct ice_ring *tx_ring = vsi->tx_rings[q_id];\n-\n-\t\t\ttx_ring->q_vector = q_vector;\n-\t\t\ttx_ring->next = q_vector->tx.ring;\n-\t\t\tq_vector->tx.ring = tx_ring;\n-\t\t}\n-\t\ttx_rings_rem -= tx_rings_per_v;\n-\n-\t\t/* Rx rings mapping to vector */\n-\t\trx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);\n-\t\tq_vector->num_ring_rx = rx_rings_per_v;\n-\t\tq_vector->rx.ring = NULL;\n-\t\tq_vector->rx.itr_idx = ICE_RX_ITR;\n-\t\tq_base = vsi->num_rxq - rx_rings_rem;\n-\n-\t\tfor (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {\n-\t\t\tstruct ice_ring *rx_ring = vsi->rx_rings[q_id];\n-\n-\t\t\trx_ring->q_vector = q_vector;\n-\t\t\trx_ring->next = q_vector->rx.ring;\n-\t\t\tq_vector->rx.ring = rx_ring;\n-\t\t}\n-\t\trx_rings_rem -= rx_rings_per_v;\n-\t}\n-}\n-\n /**\n * ice_vsi_manage_rss_lut - disable/enable RSS\n * @vsi: the VSI being changed\n@@ -1746,62 +1256,6 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)\n \treturn 0;\n }\n \n-/**\n- * ice_vsi_cfg_txq - Configure single Tx queue\n- * @vsi: the VSI that queue belongs to\n- * @ring: Tx ring to be configured\n- * @tc_q_idx: queue index within given TC\n- * @qg_buf: queue group buffer\n- * @tc: TC that Tx ring belongs to\n- */\n-static int\n-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,\n-\t\tstruct ice_aqc_add_tx_qgrp *qg_buf, u8 tc)\n-{\n-\tstruct ice_tlan_ctx tlan_ctx = { 0 };\n-\tstruct ice_aqc_add_txqs_perq *txq;\n-\tstruct ice_pf *pf = vsi->back;\n-\tu8 buf_len = sizeof(*qg_buf);\n-\tenum ice_status status;\n-\tu16 pf_q;\n-\n-\tpf_q = ring->reg_idx;\n-\tice_setup_tx_ctx(ring, &tlan_ctx, pf_q);\n-\t/* copy context contents into the qg_buf */\n-\tqg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);\n-\tice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,\n-\t\t ice_tlan_ctx_info);\n-\n-\t/* init queue specific tail reg. It is referred as\n-\t * transmit comm scheduler queue doorbell.\n-\t */\n-\tring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);\n-\n-\t/* Add unique software queue handle of the Tx queue per\n-\t * TC into the VSI Tx ring\n-\t */\n-\tring->q_handle = tc_q_idx;\n-\n-\tstatus = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,\n-\t\t\t\t 1, qg_buf, buf_len, NULL);\n-\tif (status) {\n-\t\tdev_err(&pf->pdev->dev,\n-\t\t\t\"Failed to set LAN Tx queue context, error: %d\\n\",\n-\t\t\tstatus);\n-\t\treturn -ENODEV;\n-\t}\n-\n-\t/* Add Tx Queue TEID into the VSI Tx ring from the\n-\t * response. This will complete configuring and\n-\t * enabling the queue.\n-\t */\n-\ttxq = &qg_buf->txqs[0];\n-\tif (pf_q == le16_to_cpu(txq->txq_id))\n-\t\tring->txq_teid = le32_to_cpu(txq->q_teid);\n-\n-\treturn 0;\n-}\n-\n /**\n * ice_vsi_cfg_txqs - Configure the VSI for Tx\n * @vsi: the VSI being configured\n@@ -1874,141 +1328,6 @@ u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)\n \treturn 0;\n }\n \n-/**\n- * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set\n- * @hw: board specific structure\n- */\n-static void ice_cfg_itr_gran(struct ice_hw *hw)\n-{\n-\tu32 regval = rd32(hw, GLINT_CTL);\n-\n-\t/* no need to update global register if ITR gran is already set */\n-\tif (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&\n-\t (((regval & GLINT_CTL_ITR_GRAN_200_M) >>\n-\t GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&\n-\t (((regval & GLINT_CTL_ITR_GRAN_100_M) >>\n-\t GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&\n-\t (((regval & GLINT_CTL_ITR_GRAN_50_M) >>\n-\t GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&\n-\t (((regval & GLINT_CTL_ITR_GRAN_25_M) >>\n-\t GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))\n-\t\treturn;\n-\n-\tregval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &\n-\t\t GLINT_CTL_ITR_GRAN_200_M) |\n-\t\t ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &\n-\t\t GLINT_CTL_ITR_GRAN_100_M) |\n-\t\t ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &\n-\t\t GLINT_CTL_ITR_GRAN_50_M) |\n-\t\t ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &\n-\t\t GLINT_CTL_ITR_GRAN_25_M);\n-\twr32(hw, GLINT_CTL, regval);\n-}\n-\n-/**\n- * ice_cfg_itr - configure the initial interrupt throttle values\n- * @hw: pointer to the HW structure\n- * @q_vector: interrupt vector that's being configured\n- *\n- * Configure interrupt throttling values for the ring containers that are\n- * associated with the interrupt vector passed in.\n- */\n-static void\n-ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)\n-{\n-\tice_cfg_itr_gran(hw);\n-\n-\tif (q_vector->num_ring_rx) {\n-\t\tstruct ice_ring_container *rc = &q_vector->rx;\n-\n-\t\t/* if this value is set then don't overwrite with default */\n-\t\tif (!rc->itr_setting)\n-\t\t\trc->itr_setting = ICE_DFLT_RX_ITR;\n-\n-\t\trc->target_itr = ITR_TO_REG(rc->itr_setting);\n-\t\trc->next_update = jiffies + 1;\n-\t\trc->current_itr = rc->target_itr;\n-\t\twr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),\n-\t\t ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);\n-\t}\n-\n-\tif (q_vector->num_ring_tx) {\n-\t\tstruct ice_ring_container *rc = &q_vector->tx;\n-\n-\t\t/* if this value is set then don't overwrite with default */\n-\t\tif (!rc->itr_setting)\n-\t\t\trc->itr_setting = ICE_DFLT_TX_ITR;\n-\n-\t\trc->target_itr = ITR_TO_REG(rc->itr_setting);\n-\t\trc->next_update = jiffies + 1;\n-\t\trc->current_itr = rc->target_itr;\n-\t\twr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),\n-\t\t ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);\n-\t}\n-}\n-\n-/**\n- * ice_cfg_txq_interrupt - configure interrupt on Tx queue\n- * @vsi: the VSI being configured\n- * @txq: Tx queue being mapped to MSI-X vector\n- * @msix_idx: MSI-X vector index within the function\n- * @itr_idx: ITR index of the interrupt cause\n- *\n- * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector\n- * within the function space.\n- */\n-#ifdef CONFIG_PCI_IOV\n-void\n-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)\n-#else\n-static void\n-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)\n-#endif /* CONFIG_PCI_IOV */\n-{\n-\tstruct ice_pf *pf = vsi->back;\n-\tstruct ice_hw *hw = &pf->hw;\n-\tu32 val;\n-\n-\titr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;\n-\n-\tval = QINT_TQCTL_CAUSE_ENA_M | itr_idx |\n-\t ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);\n-\n-\twr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);\n-}\n-\n-/**\n- * ice_cfg_rxq_interrupt - configure interrupt on Rx queue\n- * @vsi: the VSI being configured\n- * @rxq: Rx queue being mapped to MSI-X vector\n- * @msix_idx: MSI-X vector index within the function\n- * @itr_idx: ITR index of the interrupt cause\n- *\n- * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector\n- * within the function space.\n- */\n-#ifdef CONFIG_PCI_IOV\n-void\n-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)\n-#else\n-static void\n-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)\n-#endif /* CONFIG_PCI_IOV */\n-{\n-\tstruct ice_pf *pf = vsi->back;\n-\tstruct ice_hw *hw = &pf->hw;\n-\tu32 val;\n-\n-\titr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;\n-\n-\tval = QINT_RQCTL_CAUSE_ENA_M | itr_idx |\n-\t ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);\n-\n-\twr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);\n-\n-\tice_flush(hw);\n-}\n-\n /**\n * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW\n * @vsi: the VSI being configured\n@@ -2168,109 +1487,6 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)\n \treturn ice_vsi_ctrl_rx_rings(vsi, false);\n }\n \n-/**\n- * ice_trigger_sw_intr - trigger a software interrupt\n- * @hw: pointer to the HW structure\n- * @q_vector: interrupt vector to trigger the software interrupt for\n- */\n-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)\n-{\n-\twr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),\n-\t (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |\n-\t GLINT_DYN_CTL_SWINT_TRIG_M |\n-\t GLINT_DYN_CTL_INTENA_M);\n-}\n-\n-/**\n- * ice_vsi_stop_tx_ring - Disable single Tx ring\n- * @vsi: the VSI being configured\n- * @rst_src: reset source\n- * @rel_vmvf_num: Relative ID of VF/VM\n- * @ring: Tx ring to be stopped\n- * @txq_meta: Meta data of Tx ring to be stopped\n- */\n-#ifndef CONFIG_PCI_IOV\n-static\n-#endif /* !CONFIG_PCI_IOV */\n-int\n-ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,\n-\t\t u16 rel_vmvf_num, struct ice_ring *ring,\n-\t\t struct ice_txq_meta *txq_meta)\n-{\n-\tstruct ice_pf *pf = vsi->back;\n-\tstruct ice_q_vector *q_vector;\n-\tstruct ice_hw *hw = &pf->hw;\n-\tenum ice_status status;\n-\tu32 val;\n-\n-\t/* clear cause_ena bit for disabled queues */\n-\tval = rd32(hw, QINT_TQCTL(ring->reg_idx));\n-\tval &= ~QINT_TQCTL_CAUSE_ENA_M;\n-\twr32(hw, QINT_TQCTL(ring->reg_idx), val);\n-\n-\t/* software is expected to wait for 100 ns */\n-\tndelay(100);\n-\n-\t/* trigger a software interrupt for the vector\n-\t * associated to the queue to schedule NAPI handler\n-\t */\n-\tq_vector = ring->q_vector;\n-\tif (q_vector)\n-\t\tice_trigger_sw_intr(hw, q_vector);\n-\n-\tstatus = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,\n-\t\t\t\t txq_meta->tc, 1, &txq_meta->q_handle,\n-\t\t\t\t &txq_meta->q_id, &txq_meta->q_teid, rst_src,\n-\t\t\t\t rel_vmvf_num, NULL);\n-\n-\t/* if the disable queue command was exercised during an\n-\t * active reset flow, ICE_ERR_RESET_ONGOING is returned.\n-\t * This is not an error as the reset operation disables\n-\t * queues at the hardware level anyway.\n-\t */\n-\tif (status == ICE_ERR_RESET_ONGOING) {\n-\t\tdev_dbg(&vsi->back->pdev->dev,\n-\t\t\t\"Reset in progress. LAN Tx queues already disabled\\n\");\n-\t} else if (status == ICE_ERR_DOES_NOT_EXIST) {\n-\t\tdev_dbg(&vsi->back->pdev->dev,\n-\t\t\t\"LAN Tx queues do not exist, nothing to disable\\n\");\n-\t} else if (status) {\n-\t\tdev_err(&vsi->back->pdev->dev,\n-\t\t\t\"Failed to disable LAN Tx queues, error: %d\\n\", status);\n-\t\treturn -ENODEV;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * ice_fill_txq_meta - Prepare the Tx queue's meta data\n- * @vsi: VSI that ring belongs to\n- * @ring: ring that txq_meta will be based on\n- * @txq_meta: a helper struct that wraps Tx queue's information\n- *\n- * Set up a helper struct that will contain all the necessary fields that\n- * are needed for stopping Tx queue\n- */\n-#ifndef CONFIG_PCI_IOV\n-static\n-#endif /* !CONFIG_PCI_IOV */\n-void\n-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,\n-\t\t struct ice_txq_meta *txq_meta)\n-{\n-\tu8 tc = 0;\n-\n-#ifdef CONFIG_DCB\n-\ttc = ring->dcb_tc;\n-#endif /* CONFIG_DCB */\n-\ttxq_meta->q_id = ring->reg_idx;\n-\ttxq_meta->q_teid = ring->txq_teid;\n-\ttxq_meta->q_handle = ring->q_handle;\n-\ttxq_meta->vsi_idx = vsi->idx;\n-\ttxq_meta->tc = tc;\n-}\n-\n /**\n * ice_vsi_stop_tx_rings - Disable Tx rings\n * @vsi: the VSI being configured\n@@ -3128,7 +2344,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)\n \tif (ret < 0)\n \t\tgoto err_vsi;\n \n-\n \tswitch (vsi->type) {\n \tcase ICE_VSI_PF:\n \t\tret = ice_vsi_alloc_q_vectors(vsi);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h\nindex e4ddd315b1b2..9919d3f810e3 100644\n--- a/drivers/net/ethernet/intel/ice/ice_lib.h\n+++ b/drivers/net/ethernet/intel/ice/ice_lib.h\n@@ -6,19 +6,6 @@\n \n #include \"ice.h\"\n \n-struct ice_txq_meta {\n-\t/* Tx-scheduler element identifier */\n-\tu32 q_teid;\n-\t/* Entry in VSI's txq_map bitmap */\n-\tu16 q_id;\n-\t/* Relative index of Tx queue within TC */\n-\tu16 q_handle;\n-\t/* VSI index that Tx queue belongs to */\n-\tu16 vsi_idx;\n-\t/* TC number that Tx queue belongs to */\n-\tu8 tc;\n-};\n-\n int\n ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,\n \t\t const u8 *macaddr);\n@@ -35,24 +22,6 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);\n \n void ice_vsi_cfg_msix(struct ice_vsi *vsi);\n \n-#ifdef CONFIG_PCI_IOV\n-void\n-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);\n-\n-void\n-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);\n-\n-int\n-ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,\n-\t\t u16 rel_vmvf_num, struct ice_ring *ring,\n-\t\t struct ice_txq_meta *txq_meta);\n-\n-void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,\n-\t\t struct ice_txq_meta *txq_meta);\n-\n-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);\n-#endif /* CONFIG_PCI_IOV */\n-\n int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);\n \n int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);\n@@ -100,16 +69,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi);\n \n bool ice_is_reset_in_progress(unsigned long *state);\n \n-void ice_vsi_free_q_vectors(struct ice_vsi *vsi);\n-\n-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);\n-\n void ice_vsi_put_qs(struct ice_vsi *vsi);\n \n-#ifdef CONFIG_DCB\n-void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);\n-#endif /* CONFIG_DCB */\n-\n void ice_vsi_dis_irq(struct ice_vsi *vsi);\n \n void ice_vsi_free_irq(struct ice_vsi *vsi);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c\nindex 7d8b15fbd874..d2abffe827ef 100644\n--- a/drivers/net/ethernet/intel/ice/ice_main.c\n+++ b/drivers/net/ethernet/intel/ice/ice_main.c\n@@ -6,6 +6,7 @@\n #define pr_fmt(fmt) KBUILD_MODNAME \": \" fmt\n \n #include \"ice.h\"\n+#include \"ice_base.h\"\n #include \"ice_lib.h\"\n #include \"ice_dcb_lib.h\"\n \ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h\nindex 94a9280193e2..a914e603b2ed 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.h\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h\n@@ -205,9 +205,7 @@ struct ice_ring {\n \tunsigned int size;\t\t/* length of descriptor ring in bytes */\n \tu32 txq_teid;\t\t\t/* Added Tx queue TEID */\n \tu16 rx_buf_len;\n-#ifdef CONFIG_DCB\n \tu8 dcb_tc;\t\t\t/* Traffic class of ring */\n-#endif /* CONFIG_DCB */\n } ____cacheline_internodealigned_in_smp;\n \n struct ice_ring_container {\ndiff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c\nindex 284b24a51a76..5cb809c58609 100644\n--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c\n+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c\n@@ -2,6 +2,7 @@\n /* Copyright (c) 2018, Intel Corporation. */\n \n #include \"ice.h\"\n+#include \"ice_base.h\"\n #include \"ice_lib.h\"\n \n /**\n", "prefixes": [ "S30", "v2", "1/9" ] }