get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/886558/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 886558,
    "url": "http://patchwork.ozlabs.org/api/patches/886558/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180315234802.31336-10-anirudh.venkataramanan@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20180315234802.31336-10-anirudh.venkataramanan@intel.com>",
    "list_archive_url": null,
    "date": "2018-03-15T23:47:56",
    "name": "[v2,09/15] ice: Configure VSIs for Tx/Rx",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "94457f5ba282a9eac3d3cd207c978b6b65bda701",
    "submitter": {
        "id": 73601,
        "url": "http://patchwork.ozlabs.org/api/people/73601/?format=api",
        "name": "Anirudh Venkataramanan",
        "email": "anirudh.venkataramanan@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180315234802.31336-10-anirudh.venkataramanan@intel.com/mbox/",
    "series": [
        {
            "id": 34096,
            "url": "http://patchwork.ozlabs.org/api/series/34096/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=34096",
            "date": "2018-03-15T23:47:47",
            "name": "Add ice driver",
            "version": 2,
            "mbox": "http://patchwork.ozlabs.org/series/34096/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/886558/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/886558/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.137; helo=fraxinus.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=none (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 402Qjt0MD9z9sVP\n\tfor <incoming@patchwork.ozlabs.org>;\n\tFri, 16 Mar 2018 11:06:41 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id CEF9B884D0;\n\tFri, 16 Mar 2018 00:06:39 +0000 (UTC)",
            "from fraxinus.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id K4_48yndVKOh; Fri, 16 Mar 2018 00:06:30 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id B80BE884B6;\n\tFri, 16 Mar 2018 00:06:30 +0000 (UTC)",
            "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id 31B251C0359\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 15 Mar 2018 23:48:17 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 23FF48A2C4\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 15 Mar 2018 23:48:17 +0000 (UTC)",
            "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id bk0YNhttlXjs for <intel-wired-lan@lists.osuosl.org>;\n\tThu, 15 Mar 2018 23:48:06 +0000 (UTC)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id 9F4488A258\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 15 Mar 2018 23:48:06 +0000 (UTC)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n\tby fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t15 Mar 2018 16:48:05 -0700",
            "from shasta.jf.intel.com ([10.166.241.32])\n\tby fmsmga004.fm.intel.com with ESMTP; 15 Mar 2018 16:48:04 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.48,313,1517904000\"; d=\"scan'208\";a=\"37836792\"",
        "From": "Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Thu, 15 Mar 2018 16:47:56 -0700",
        "Message-Id": "<20180315234802.31336-10-anirudh.venkataramanan@intel.com>",
        "X-Mailer": "git-send-email 2.14.3",
        "In-Reply-To": "<20180315234802.31336-1-anirudh.venkataramanan@intel.com>",
        "References": "<20180315234802.31336-1-anirudh.venkataramanan@intel.com>",
        "X-Mailman-Approved-At": "Fri, 16 Mar 2018 00:06:28 +0000",
        "Subject": "[Intel-wired-lan] [PATCH v2 09/15] ice: Configure VSIs for Tx/Rx",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.24",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "Cc": "netdev@vger.kernel.org",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "This patch configures the VSIs to be able to send and receive\npackets by doing the following:\n\n1) Initialize flexible parser to extract and include certain\n   fields in the Rx descriptor.\n\n2) Add Tx queues by programming the Tx queue context (implemented in\n   ice_vsi_cfg_txqs). Note that adding the queues also enables (starts)\n   the queues.\n\n3) Add Rx queues by programming Rx queue context (implemented in\n   ice_vsi_cfg_rxqs). Note that this only adds queues but doesn't start\n   them. The rings will be started by calling ice_vsi_start_rx_rings on\n   interface up.\n\n4) Configure interrupts for VSI queues.\n\n5) Implement ice_open and ice_stop.\n\nSigned-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>\n---\n drivers/net/ethernet/intel/ice/Makefile         |    3 +-\n drivers/net/ethernet/intel/ice/ice.h            |   36 +-\n drivers/net/ethernet/intel/ice/ice_adminq_cmd.h |   86 ++\n drivers/net/ethernet/intel/ice/ice_common.c     |  602 ++++++++++++\n drivers/net/ethernet/intel/ice/ice_common.h     |   13 +\n drivers/net/ethernet/intel/ice/ice_hw_autogen.h |   59 ++\n drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h  |  260 ++++++\n drivers/net/ethernet/intel/ice/ice_main.c       | 1140 ++++++++++++++++++++++-\n drivers/net/ethernet/intel/ice/ice_sched.c      |  105 +++\n drivers/net/ethernet/intel/ice/ice_sched.h      |    5 +\n drivers/net/ethernet/intel/ice/ice_status.h     |    2 +\n drivers/net/ethernet/intel/ice/ice_txrx.c       |  375 ++++++++\n drivers/net/ethernet/intel/ice/ice_txrx.h       |   75 ++\n drivers/net/ethernet/intel/ice/ice_type.h       |    2 +\n 14 files changed, 2757 insertions(+), 6 deletions(-)\n create mode 100644 drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h\n create mode 100644 drivers/net/ethernet/intel/ice/ice_txrx.c",
    "diff": "diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile\nindex 809d85c04398..0abeb20c006d 100644\n--- a/drivers/net/ethernet/intel/ice/Makefile\n+++ b/drivers/net/ethernet/intel/ice/Makefile\n@@ -29,4 +29,5 @@ ice-y := ice_main.o\t\\\n \t ice_common.o\t\\\n \t ice_nvm.o\t\\\n \t ice_switch.o\t\\\n-\t ice_sched.o\n+\t ice_sched.o\t\\\n+\t ice_txrx.o\ndiff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h\nindex c9f59374daad..e3ec19099e37 100644\n--- a/drivers/net/ethernet/intel/ice/ice.h\n+++ b/drivers/net/ethernet/intel/ice/ice.h\n@@ -25,8 +25,10 @@\n #include <linux/netdevice.h>\n #include <linux/compiler.h>\n #include <linux/etherdevice.h>\n+#include <linux/skbuff.h>\n #include <linux/cpumask.h>\n #include <linux/if_vlan.h>\n+#include <linux/dma-mapping.h>\n #include <linux/pci.h>\n #include <linux/workqueue.h>\n #include <linux/aer.h>\n@@ -57,6 +59,8 @@\n #define ICE_VSI_MAP_SCATTER\t1\n #define ICE_MAX_SCATTER_TXQS\t16\n #define ICE_MAX_SCATTER_RXQS\t16\n+#define ICE_Q_WAIT_RETRY_LIMIT\t10\n+#define ICE_Q_WAIT_MAX_RETRY\t(5 * ICE_Q_WAIT_RETRY_LIMIT)\n #define ICE_RES_VALID_BIT\t0x8000\n #define ICE_RES_MISC_VEC_ID\t(ICE_RES_VALID_BIT - 1)\n #define ICE_INVAL_Q_INDEX\t0xffff\n@@ -70,6 +74,14 @@\n \t\t(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \\\n \t\t  ICE_AQ_VSI_UP_TABLE_UP##i##_M)\n \n+#define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))\n+\n+#define ice_for_each_txq(vsi, i) \\\n+\tfor ((i) = 0; (i) < (vsi)->num_txq; (i)++)\n+\n+#define ice_for_each_rxq(vsi, i) \\\n+\tfor ((i) = 0; (i) < (vsi)->num_rxq; (i)++)\n+\n struct ice_tc_info {\n \tu16 qoffset;\n \tu16 qcount;\n@@ -110,6 +122,9 @@ struct ice_vsi {\n \tstruct ice_ring **rx_rings;\t /* rx ring array */\n \tstruct ice_ring **tx_rings;\t /* tx ring array */\n \tstruct ice_q_vector **q_vectors; /* q_vector array */\n+\n+\tirqreturn_t (*irq_handler)(int irq, void *data);\n+\n \tDECLARE_BITMAP(state, __ICE_STATE_NBITS);\n \tint num_q_vectors;\n \tint base_vector;\n@@ -120,8 +135,14 @@ struct ice_vsi {\n \t/* Interrupt thresholds */\n \tu16 work_lmt;\n \n+\tu16 max_frame;\n+\tu16 rx_buf_len;\n+\n \tstruct ice_aqc_vsi_props info;\t /* VSI properties */\n \n+\tbool irqs_ready;\n+\tbool current_isup;\t\t /* Sync 'link up' logging */\n+\n \t/* queue information */\n \tu8 tx_mapping_mode;\t\t /* ICE_MAP_MODE_[CONTIG|SCATTER] */\n \tu8 rx_mapping_mode;\t\t /* ICE_MAP_MODE_[CONTIG|SCATTER] */\n@@ -142,9 +163,11 @@ struct ice_q_vector {\n \tstruct napi_struct napi;\n \tstruct ice_ring_container rx;\n \tstruct ice_ring_container tx;\n+\tstruct irq_affinity_notify affinity_notify;\n \tu16 v_idx;\t\t\t/* index in the vsi->q_vector array. */\n \tu8 num_ring_tx;\t\t\t/* total number of tx rings in vector */\n \tu8 num_ring_rx;\t\t\t/* total number of rx rings in vector */\n+\tchar name[ICE_INT_NAME_STR_LEN];\n } ____cacheline_internodealigned_in_smp;\n \n enum ice_pf_flags {\n@@ -192,10 +215,14 @@ struct ice_netdev_priv {\n /**\n  * ice_irq_dynamic_ena - Enable default interrupt generation settings\n  * @hw: pointer to hw struct\n+ * @vsi: pointer to vsi struct, can be NULL\n+ * @q_vector: pointer to q_vector, can be NULL\n  */\n-static inline void ice_irq_dynamic_ena(struct ice_hw *hw)\n+static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,\n+\t\t\t\t       struct ice_q_vector *q_vector)\n {\n-\tu32 vector = ((struct ice_pf *)hw->back)->oicr_idx;\n+\tu32 vector = (vsi && q_vector) ? vsi->base_vector + q_vector->v_idx :\n+\t\t\t\t\t((struct ice_pf *)hw->back)->oicr_idx;\n \tint itr = ICE_ITR_NONE;\n \tu32 val;\n \n@@ -204,7 +231,10 @@ static inline void ice_irq_dynamic_ena(struct ice_hw *hw)\n \t */\n \tval = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |\n \t      (itr << GLINT_DYN_CTL_ITR_INDX_S);\n-\n+\tif (vsi)\n+\t\tif (test_bit(__ICE_DOWN, vsi->state))\n+\t\t\treturn;\n \twr32(hw, GLINT_DYN_CTL(vector), val);\n }\n+\n #endif /* _ICE_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\nindex c834ed38602b..358a482630db 100644\n--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n@@ -982,6 +982,87 @@ struct ice_aqc_nvm {\n \t__le32\taddr_low;\n };\n \n+/* Add TX LAN Queues (indirect 0x0C30) */\n+struct ice_aqc_add_txqs {\n+\tu8 num_qgrps;\n+\tu8 reserved[3];\n+\t__le32 reserved1;\n+\t__le32 addr_high;\n+\t__le32 addr_low;\n+};\n+\n+/* This is the descriptor of each queue entry for the Add TX LAN Queues\n+ * command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp.\n+ */\n+struct ice_aqc_add_txqs_perq {\n+\t__le16 txq_id;\n+\tu8 rsvd[2];\n+\t__le32 q_teid;\n+\tu8 txq_ctx[22];\n+\tu8 rsvd2[2];\n+\tstruct ice_aqc_txsched_elem info;\n+};\n+\n+/* The format of the command buffer for Add TX LAN Queues (0x0C30)\n+ * is an array of the following structs. Please note that the length of\n+ * each struct ice_aqc_add_tx_qgrp is variable due\n+ * to the variable number of queues in each group!\n+ */\n+struct ice_aqc_add_tx_qgrp {\n+\t__le32 parent_teid;\n+\tu8 num_txqs;\n+\tu8 rsvd[3];\n+\tstruct ice_aqc_add_txqs_perq txqs[1];\n+};\n+\n+/* Disable TX LAN Queues (indirect 0x0C31) */\n+struct ice_aqc_dis_txqs {\n+\tu8 cmd_type;\n+#define ICE_AQC_Q_DIS_CMD_S\t\t0\n+#define ICE_AQC_Q_DIS_CMD_M\t\t(0x3 << ICE_AQC_Q_DIS_CMD_S)\n+#define ICE_AQC_Q_DIS_CMD_NO_FUNC_RESET\t(0 << ICE_AQC_Q_DIS_CMD_S)\n+#define ICE_AQC_Q_DIS_CMD_VM_RESET\tBIT(ICE_AQC_Q_DIS_CMD_S)\n+#define ICE_AQC_Q_DIS_CMD_VF_RESET\t(2 << ICE_AQC_Q_DIS_CMD_S)\n+#define ICE_AQC_Q_DIS_CMD_PF_RESET\t(3 << ICE_AQC_Q_DIS_CMD_S)\n+#define ICE_AQC_Q_DIS_CMD_SUBSEQ_CALL\tBIT(2)\n+#define ICE_AQC_Q_DIS_CMD_FLUSH_PIPE\tBIT(3)\n+\tu8 num_entries;\n+\t__le16 vmvf_and_timeout;\n+#define ICE_AQC_Q_DIS_VMVF_NUM_S\t0\n+#define ICE_AQC_Q_DIS_VMVF_NUM_M\t(0x3FF << ICE_AQC_Q_DIS_VMVF_NUM_S)\n+#define ICE_AQC_Q_DIS_TIMEOUT_S\t\t10\n+#define ICE_AQC_Q_DIS_TIMEOUT_M\t\t(0x3F << ICE_AQC_Q_DIS_TIMEOUT_S)\n+\t__le32 blocked_cgds;\n+\t__le32 addr_high;\n+\t__le32 addr_low;\n+};\n+\n+/* The buffer for Disable TX LAN Queues (indirect 0x0C31)\n+ * contains the following structures, arrayed one after the\n+ * other.\n+ * Note: Since the q_id is 16 bits wide, if the\n+ * number of queues is even, then 2 bytes of alignment MUST be\n+ * added before the start of the next group, to allow correct\n+ * alignment of the parent_teid field.\n+ */\n+struct ice_aqc_dis_txq_item {\n+\t__le32 parent_teid;\n+\tu8 num_qs;\n+\tu8 rsvd;\n+\t/* The length of the q_id array varies according to num_qs */\n+\t__le16 q_id[1];\n+\t/* This only applies from F8 onward */\n+#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S\t\t15\n+#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q\t\\\n+\t\t\t(0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)\n+#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET\t\\\n+\t\t\t(1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)\n+};\n+\n+struct ice_aqc_dis_txq {\n+\tstruct ice_aqc_dis_txq_item qgrps[1];\n+};\n+\n /**\n  * struct ice_aq_desc - Admin Queue (AQ) descriptor\n  * @flags: ICE_AQ_FLAG_* flags\n@@ -1022,6 +1103,8 @@ struct ice_aq_desc {\n \t\tstruct ice_aqc_query_txsched_res query_sched_res;\n \t\tstruct ice_aqc_add_move_delete_elem add_move_delete_elem;\n \t\tstruct ice_aqc_nvm nvm;\n+\t\tstruct ice_aqc_add_txqs add_txqs;\n+\t\tstruct ice_aqc_dis_txqs dis_txqs;\n \t\tstruct ice_aqc_add_get_update_free_vsi vsi_cmd;\n \t\tstruct ice_aqc_alloc_free_res_cmd sw_res_ctrl;\n \t\tstruct ice_aqc_get_link_status get_link_status;\n@@ -1102,6 +1185,9 @@ enum ice_adminq_opc {\n \t/* NVM commands */\n \tice_aqc_opc_nvm_read\t\t\t\t= 0x0701,\n \n+\t/* TX queue handling commands/events */\n+\tice_aqc_opc_add_txqs\t\t\t\t= 0x0C30,\n+\tice_aqc_opc_dis_txqs\t\t\t\t= 0x0C31,\n };\n \n #endif /* _ICE_ADMINQ_CMD_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c\nindex cb48146b46f3..640edf485d50 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.c\n+++ b/drivers/net/ethernet/intel/ice/ice_common.c\n@@ -21,6 +21,25 @@\n \n #define ICE_PF_RESET_WAIT_COUNT\t200\n \n+#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \\\n+\twr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \\\n+\t     ((ICE_RX_OPC_MDID << \\\n+\t       GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \\\n+\t      GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \\\n+\t     (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \\\n+\t      GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))\n+\n+#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \\\n+\twr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \\\n+\t     (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \\\n+\t      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \\\n+\t     (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \\\n+\t      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \\\n+\t     (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \\\n+\t      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \\\n+\t     (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \\\n+\t      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))\n+\n /**\n  * ice_set_mac_type - Sets MAC type\n  * @hw: pointer to the HW structure\n@@ -272,6 +291,33 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,\n \treturn status;\n }\n \n+/**\n+ * ice_init_flex_parser - initialize rx flex parser\n+ * @hw: pointer to the hardware structure\n+ *\n+ * Function to initialize flex descriptors\n+ */\n+static void ice_init_flex_parser(struct ice_hw *hw)\n+{\n+\tu8 idx = 0;\n+\n+\tICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0);\n+\tICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1);\n+\tICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2);\n+\tICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3);\n+\tICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE,\n+\t\t\t      ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++);\n+\tICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST,\n+\t\t\t      ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);\n+\tICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI,\n+\t\t\t      ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100,\n+\t\t\t      idx++);\n+\tICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN,\n+\t\t\t      ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++);\n+\tICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,\n+\t\t\t      ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);\n+}\n+\n /**\n  * ice_init_fltr_mgmt_struct - initializes filter management list and locks\n  * @hw: pointer to the hw struct\n@@ -445,6 +491,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)\n \tif (status)\n \t\tgoto err_unroll_fltr_mgmt_struct;\n \n+\tice_init_flex_parser(hw);\n+\n \treturn 0;\n \n err_unroll_fltr_mgmt_struct:\n@@ -611,6 +659,114 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)\n \treturn ice_check_reset(hw);\n }\n \n+/**\n+ * ice_copy_rxq_ctx_to_hw\n+ * @hw: pointer to the hardware structure\n+ * @ice_rxq_ctx: pointer to the rxq context\n+ * @rxq_index: the index of the rx queue\n+ *\n+ * Copies rxq context from dense structure to hw register space\n+ */\n+static enum ice_status\n+ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)\n+{\n+\tu8 i;\n+\n+\tif (!ice_rxq_ctx)\n+\t\treturn ICE_ERR_BAD_PTR;\n+\n+\tif (rxq_index > QRX_CTRL_MAX_INDEX)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\t/* Copy each dword separately to hw */\n+\tfor (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {\n+\t\twr32(hw, QRX_CONTEXT(i, rxq_index),\n+\t\t     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));\n+\n+\t\tice_debug(hw, ICE_DBG_QCTX, \"qrxdata[%d]: %08X\\n\", i,\n+\t\t\t  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* LAN Rx Queue Context */\n+static const struct ice_ctx_ele ice_rlan_ctx_info[] = {\n+\t/* Field\t\tWidth\tLSB */\n+\tICE_CTX_STORE(ice_rlan_ctx, head,\t\t13,\t0),\n+\tICE_CTX_STORE(ice_rlan_ctx, cpuid,\t\t8,\t13),\n+\tICE_CTX_STORE(ice_rlan_ctx, base,\t\t57,\t32),\n+\tICE_CTX_STORE(ice_rlan_ctx, qlen,\t\t13,\t89),\n+\tICE_CTX_STORE(ice_rlan_ctx, dbuf,\t\t7,\t102),\n+\tICE_CTX_STORE(ice_rlan_ctx, hbuf,\t\t5,\t109),\n+\tICE_CTX_STORE(ice_rlan_ctx, dtype,\t\t2,\t114),\n+\tICE_CTX_STORE(ice_rlan_ctx, dsize,\t\t1,\t116),\n+\tICE_CTX_STORE(ice_rlan_ctx, crcstrip,\t\t1,\t117),\n+\tICE_CTX_STORE(ice_rlan_ctx, l2tsel,\t\t1,\t119),\n+\tICE_CTX_STORE(ice_rlan_ctx, hsplit_0,\t\t4,\t120),\n+\tICE_CTX_STORE(ice_rlan_ctx, hsplit_1,\t\t2,\t124),\n+\tICE_CTX_STORE(ice_rlan_ctx, showiv,\t\t1,\t127),\n+\tICE_CTX_STORE(ice_rlan_ctx, rxmax,\t\t14,\t174),\n+\tICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,\t1,\t193),\n+\tICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,\t1,\t194),\n+\tICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,\t1,\t195),\n+\tICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,\t1,\t196),\n+\tICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,\t\t3,\t198),\n+\t{ 0 }\n+};\n+\n+/**\n+ * ice_write_rxq_ctx\n+ * @hw: pointer to the hardware structure\n+ * @rlan_ctx: pointer to the rxq context\n+ * @rxq_index: the index of the rx queue\n+ *\n+ * Converts rxq context from sparse to dense structure and then writes\n+ * it to hw register space\n+ */\n+enum ice_status\n+ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,\n+\t\t  u32 rxq_index)\n+{\n+\tu8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };\n+\n+\tice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);\n+\treturn ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);\n+}\n+\n+/* LAN Tx Queue Context */\n+const struct ice_ctx_ele ice_tlan_ctx_info[] = {\n+\t\t\t\t    /* Field\t\t\tWidth\tLSB */\n+\tICE_CTX_STORE(ice_tlan_ctx, base,\t\t\t57,\t0),\n+\tICE_CTX_STORE(ice_tlan_ctx, port_num,\t\t\t3,\t57),\n+\tICE_CTX_STORE(ice_tlan_ctx, cgd_num,\t\t\t5,\t60),\n+\tICE_CTX_STORE(ice_tlan_ctx, pf_num,\t\t\t3,\t65),\n+\tICE_CTX_STORE(ice_tlan_ctx, vmvf_num,\t\t\t10,\t68),\n+\tICE_CTX_STORE(ice_tlan_ctx, vmvf_type,\t\t\t2,\t78),\n+\tICE_CTX_STORE(ice_tlan_ctx, src_vsi,\t\t\t10,\t80),\n+\tICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,\t\t\t1,\t90),\n+\tICE_CTX_STORE(ice_tlan_ctx, alt_vlan,\t\t\t1,\t92),\n+\tICE_CTX_STORE(ice_tlan_ctx, cpuid,\t\t\t8,\t93),\n+\tICE_CTX_STORE(ice_tlan_ctx, wb_mode,\t\t\t1,\t101),\n+\tICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,\t\t\t1,\t102),\n+\tICE_CTX_STORE(ice_tlan_ctx, tphrd,\t\t\t1,\t103),\n+\tICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,\t\t\t1,\t104),\n+\tICE_CTX_STORE(ice_tlan_ctx, cmpq_id,\t\t\t9,\t105),\n+\tICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,\t\t14,\t114),\n+\tICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,\t1,\t128),\n+\tICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,\t\t6,\t129),\n+\tICE_CTX_STORE(ice_tlan_ctx, qlen,\t\t\t13,\t135),\n+\tICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,\t\t4,\t148),\n+\tICE_CTX_STORE(ice_tlan_ctx, tso_ena,\t\t\t1,\t152),\n+\tICE_CTX_STORE(ice_tlan_ctx, tso_qnum,\t\t\t11,\t153),\n+\tICE_CTX_STORE(ice_tlan_ctx, legacy_int,\t\t\t1,\t164),\n+\tICE_CTX_STORE(ice_tlan_ctx, drop_ena,\t\t\t1,\t165),\n+\tICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,\t\t2,\t166),\n+\tICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,\t3,\t168),\n+\tICE_CTX_STORE(ice_tlan_ctx, int_q_state,\t\t110,\t171),\n+\t{ 0 }\n+};\n+\n /**\n  * ice_debug_cq\n  * @hw: pointer to the hardware structure\n@@ -1118,3 +1274,449 @@ void ice_clear_pxe_mode(struct ice_hw *hw)\n \tif (ice_check_sq_alive(hw, &hw->adminq))\n \t\tice_aq_clear_pxe_mode(hw);\n }\n+\n+/**\n+ * ice_aq_add_lan_txq\n+ * @hw: pointer to the hardware structure\n+ * @num_qgrps: Number of added queue groups\n+ * @qg_list: list of queue groups to be added\n+ * @buf_size: size of buffer for indirect command\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Add Tx LAN queue (0x0C30)\n+ *\n+ * NOTE:\n+ * Prior to calling add Tx LAN queue:\n+ * Initialize the following as part of the Tx queue context:\n+ * Completion queue ID if the queue uses Completion queue, Quanta profile,\n+ * Cache profile and Packet shaper profile.\n+ *\n+ * After add Tx LAN queue AQ command is completed:\n+ * Interrupts should be associated with specific queues,\n+ * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue\n+ * flow.\n+ */\n+static enum ice_status\n+ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,\n+\t\t   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,\n+\t\t   struct ice_sq_cd *cd)\n+{\n+\tu16 i, sum_header_size, sum_q_size = 0;\n+\tstruct ice_aqc_add_tx_qgrp *list;\n+\tstruct ice_aqc_add_txqs *cmd;\n+\tstruct ice_aq_desc desc;\n+\n+\tcmd = &desc.params.add_txqs;\n+\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);\n+\n+\tif (!qg_list)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tif (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tsum_header_size = num_qgrps *\n+\t\t(sizeof(*qg_list) - sizeof(*qg_list->txqs));\n+\n+\tlist = qg_list;\n+\tfor (i = 0; i < num_qgrps; i++) {\n+\t\tstruct ice_aqc_add_txqs_perq *q = list->txqs;\n+\n+\t\tsum_q_size += list->num_txqs * sizeof(*q);\n+\t\tlist = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);\n+\t}\n+\n+\tif (buf_size != (sum_header_size + sum_q_size))\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tdesc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);\n+\n+\tcmd->num_qgrps = num_qgrps;\n+\n+\treturn ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);\n+}\n+\n+/**\n+ * ice_aq_dis_lan_txq\n+ * @hw: pointer to the hardware structure\n+ * @num_qgrps: number of groups in the list\n+ * @qg_list: the list of groups to disable\n+ * @buf_size: the total size of the qg_list buffer in bytes\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Disable LAN Tx queue (0x0C31)\n+ */\n+static enum ice_status\n+ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,\n+\t\t   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,\n+\t\t   struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_dis_txqs *cmd;\n+\tstruct ice_aq_desc desc;\n+\tu16 i, sz = 0;\n+\n+\tcmd = &desc.params.dis_txqs;\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);\n+\n+\tif (!qg_list)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tif (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)\n+\t\treturn ICE_ERR_PARAM;\n+\tdesc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);\n+\tcmd->num_entries = num_qgrps;\n+\n+\tfor (i = 0; i < num_qgrps; ++i) {\n+\t\t/* Calculate the size taken up by the queue IDs in this group */\n+\t\tsz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);\n+\n+\t\t/* Add the size of the group header */\n+\t\tsz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);\n+\n+\t\t/* If the num of queues is even, add 2 bytes of padding */\n+\t\tif ((qg_list[i].num_qs % 2) == 0)\n+\t\t\tsz += 2;\n+\t}\n+\n+\tif (buf_size != sz)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\treturn ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);\n+}\n+\n+/* End of FW Admin Queue command wrappers */\n+\n+/**\n+ * ice_write_byte - write a byte to a packed context structure\n+ * @src_ctx:  the context structure to read from\n+ * @dest_ctx: the context to be written to\n+ * @ce_info:  a description of the struct to be filled\n+ */\n+static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,\n+\t\t\t   const struct ice_ctx_ele *ce_info)\n+{\n+\tu8 src_byte, dest_byte, mask;\n+\tu8 *from, *dest;\n+\tu16 shift_width;\n+\n+\t/* copy from the next struct field */\n+\tfrom = src_ctx + ce_info->offset;\n+\n+\t/* prepare the bits and mask */\n+\tshift_width = ce_info->lsb % 8;\n+\tmask = (u8)(BIT(ce_info->width) - 1);\n+\n+\tsrc_byte = *from;\n+\tsrc_byte &= mask;\n+\n+\t/* shift to correct alignment */\n+\tmask <<= shift_width;\n+\tsrc_byte <<= shift_width;\n+\n+\t/* get the current bits from the target bit string */\n+\tdest = dest_ctx + (ce_info->lsb / 8);\n+\n+\tmemcpy(&dest_byte, dest, sizeof(dest_byte));\n+\n+\tdest_byte &= ~mask;\t/* get the bits not changing */\n+\tdest_byte |= src_byte;\t/* add in the new bits */\n+\n+\t/* put it all back */\n+\tmemcpy(dest, &dest_byte, sizeof(dest_byte));\n+}\n+\n+/**\n+ * ice_write_word - write a word to a packed context structure\n+ * @src_ctx:  the context structure to read from\n+ * @dest_ctx: the context to be written to\n+ * @ce_info:  a description of the struct to be filled\n+ */\n+static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,\n+\t\t\t   const struct ice_ctx_ele *ce_info)\n+{\n+\tu16 src_word, mask;\n+\t__le16 dest_word;\n+\tu8 *from, *dest;\n+\tu16 shift_width;\n+\n+\t/* copy from the next struct field */\n+\tfrom = src_ctx + ce_info->offset;\n+\n+\t/* prepare the bits and mask */\n+\tshift_width = ce_info->lsb % 8;\n+\tmask = BIT(ce_info->width) - 1;\n+\n+\t/* don't swizzle the bits until after the mask because the mask bits\n+\t * will be in a different bit position on big endian machines\n+\t */\n+\tsrc_word = *(u16 *)from;\n+\tsrc_word &= mask;\n+\n+\t/* shift to correct alignment */\n+\tmask <<= shift_width;\n+\tsrc_word <<= shift_width;\n+\n+\t/* get the current bits from the target bit string */\n+\tdest = dest_ctx + (ce_info->lsb / 8);\n+\n+\tmemcpy(&dest_word, dest, sizeof(dest_word));\n+\n+\tdest_word &= ~(cpu_to_le16(mask));\t/* get the bits not changing */\n+\tdest_word |= cpu_to_le16(src_word);\t/* add in the new bits */\n+\n+\t/* put it all back */\n+\tmemcpy(dest, &dest_word, sizeof(dest_word));\n+}\n+\n+/**\n+ * ice_write_dword - write a dword to a packed context structure\n+ * @src_ctx:  the context structure to read from\n+ * @dest_ctx: the context to be written to\n+ * @ce_info:  a description of the struct to be filled\n+ */\n+static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,\n+\t\t\t    const struct ice_ctx_ele *ce_info)\n+{\n+\tu32 src_dword, mask;\n+\t__le32 dest_dword;\n+\tu8 *from, *dest;\n+\tu16 shift_width;\n+\n+\t/* copy from the next struct field */\n+\tfrom = src_ctx + ce_info->offset;\n+\n+\t/* prepare the bits and mask */\n+\tshift_width = ce_info->lsb % 8;\n+\n+\t/* if the field width is exactly 32 on an x86 machine, then the shift\n+\t * operation will not work because the SHL instructions count is masked\n+\t * to 5 bits so the shift will do nothing\n+\t */\n+\tif (ce_info->width < 32)\n+\t\tmask = BIT(ce_info->width) - 1;\n+\telse\n+\t\tmask = (u32)~0;\n+\n+\t/* don't swizzle the bits until after the mask because the mask bits\n+\t * will be in a different bit position on big endian machines\n+\t */\n+\tsrc_dword = *(u32 *)from;\n+\tsrc_dword &= mask;\n+\n+\t/* shift to correct alignment */\n+\tmask <<= shift_width;\n+\tsrc_dword <<= shift_width;\n+\n+\t/* get the current bits from the target bit string */\n+\tdest = dest_ctx + (ce_info->lsb / 8);\n+\n+\tmemcpy(&dest_dword, dest, sizeof(dest_dword));\n+\n+\tdest_dword &= ~(cpu_to_le32(mask));\t/* get the bits not changing */\n+\tdest_dword |= cpu_to_le32(src_dword);\t/* add in the new bits */\n+\n+\t/* put it all back */\n+\tmemcpy(dest, &dest_dword, sizeof(dest_dword));\n+}\n+\n+/**\n+ * ice_write_qword - write a qword to a packed context structure\n+ * @src_ctx:  the context structure to read from\n+ * @dest_ctx: the context to be written to\n+ * @ce_info:  a description of the struct to be filled\n+ */\n+static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,\n+\t\t\t    const struct ice_ctx_ele *ce_info)\n+{\n+\tu64 src_qword, mask;\n+\t__le64 dest_qword;\n+\tu8 *from, *dest;\n+\tu16 shift_width;\n+\n+\t/* copy from the next struct field */\n+\tfrom = src_ctx + ce_info->offset;\n+\n+\t/* prepare the bits and mask */\n+\tshift_width = ce_info->lsb % 8;\n+\n+\t/* if the field width is exactly 64 on an x86 machine, then the shift\n+\t * operation will not work because the SHL instructions count is masked\n+\t * to 6 bits so the shift will do nothing\n+\t */\n+\tif (ce_info->width < 64)\n+\t\tmask = BIT_ULL(ce_info->width) - 1;\n+\telse\n+\t\tmask = (u64)~0;\n+\n+\t/* don't swizzle the bits until after the mask because the mask bits\n+\t * will be in a different bit position on big endian machines\n+\t */\n+\tsrc_qword = *(u64 *)from;\n+\tsrc_qword &= mask;\n+\n+\t/* shift to correct alignment */\n+\tmask <<= shift_width;\n+\tsrc_qword <<= shift_width;\n+\n+\t/* get the current bits from the target bit string */\n+\tdest = dest_ctx + (ce_info->lsb / 8);\n+\n+\tmemcpy(&dest_qword, dest, sizeof(dest_qword));\n+\n+\tdest_qword &= ~(cpu_to_le64(mask));\t/* get the bits not changing */\n+\tdest_qword |= cpu_to_le64(src_qword);\t/* add in the new bits */\n+\n+\t/* put it all back */\n+\tmemcpy(dest, &dest_qword, sizeof(dest_qword));\n+}\n+\n+/**\n+ * ice_set_ctx - set context bits in packed structure\n+ * @src_ctx:  pointer to a generic non-packed context structure\n+ * @dest_ctx: pointer to memory for the packed structure\n+ * @ce_info:  a description of the structure to be transformed\n+ */\n+enum ice_status\n+ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)\n+{\n+\tint f;\n+\n+\tfor (f = 0; ce_info[f].width; f++) {\n+\t\t/* We have to deal with each element of the FW response\n+\t\t * using the correct size so that we are correct regardless\n+\t\t * of the endianness of the machine.\n+\t\t */\n+\t\tswitch (ce_info[f].size_of) {\n+\t\tcase sizeof(u8):\n+\t\t\tice_write_byte(src_ctx, dest_ctx, &ce_info[f]);\n+\t\t\tbreak;\n+\t\tcase sizeof(u16):\n+\t\t\tice_write_word(src_ctx, dest_ctx, &ce_info[f]);\n+\t\t\tbreak;\n+\t\tcase sizeof(u32):\n+\t\t\tice_write_dword(src_ctx, dest_ctx, &ce_info[f]);\n+\t\t\tbreak;\n+\t\tcase sizeof(u64):\n+\t\t\tice_write_qword(src_ctx, dest_ctx, &ce_info[f]);\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\treturn ICE_ERR_INVAL_SIZE;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_ena_vsi_txq\n+ * @pi: port information structure\n+ * @vsi_id: VSI id\n+ * @tc: tc number\n+ * @num_qgrps: Number of added queue groups\n+ * @buf: list of queue groups to be added\n+ * @buf_size: size of buffer for indirect command\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * This function adds one lan q\n+ */\n+enum ice_status\n+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,\n+\t\tstruct ice_aqc_add_tx_qgrp *buf, u16 buf_size,\n+\t\tstruct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_txsched_elem_data node = { 0 };\n+\tstruct ice_sched_node *parent;\n+\tenum ice_status status;\n+\tstruct ice_hw *hw;\n+\n+\tif (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)\n+\t\treturn ICE_ERR_CFG;\n+\n+\tif (num_qgrps > 1 || buf->num_txqs > 1)\n+\t\treturn ICE_ERR_MAX_LIMIT;\n+\n+\thw = pi->hw;\n+\n+\tmutex_lock(&pi->sched_lock);\n+\n+\t/* find a parent node */\n+\tparent = ice_sched_get_free_qparent(pi, vsi_id, tc,\n+\t\t\t\t\t    ICE_SCHED_NODE_OWNER_LAN);\n+\tif (!parent) {\n+\t\tstatus = ICE_ERR_PARAM;\n+\t\tgoto ena_txq_exit;\n+\t}\n+\tbuf->parent_teid = parent->info.node_teid;\n+\tnode.parent_teid = parent->info.node_teid;\n+\t/* Mark that the values in the \"generic\" section as valid. The default\n+\t * value in the \"generic\" section is zero. This means that :\n+\t * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.\n+\t * - 0 priority among siblings, indicated by Bit 1-3.\n+\t * - WFQ, indicated by Bit 4.\n+\t * - 0 Adjustment value is used in PSM credit update flow, indicated by\n+\t * Bit 5-6.\n+\t * - Bit 7 is reserved.\n+\t * Without setting the generic section as valid in valid_sections, the\n+\t * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.\n+\t */\n+\tbuf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;\n+\n+\t/* add the lan q */\n+\tstatus = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);\n+\tif (status)\n+\t\tgoto ena_txq_exit;\n+\n+\tnode.node_teid = buf->txqs[0].q_teid;\n+\tnode.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;\n+\n+\t/* add a leaf node into schduler tree q layer */\n+\tstatus = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);\n+\n+ena_txq_exit:\n+\tmutex_unlock(&pi->sched_lock);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_dis_vsi_txq\n+ * @pi: port information structure\n+ * @num_queues: number of queues\n+ * @q_ids: pointer to the q_id array\n+ * @q_teids: pointer to queue node teids\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * This function removes queues and their corresponding nodes in SW DB\n+ */\n+enum ice_status\n+ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,\n+\t\tu32 *q_teids, struct ice_sq_cd *cd)\n+{\n+\tenum ice_status status = ICE_ERR_DOES_NOT_EXIST;\n+\tstruct ice_aqc_dis_txq_item qg_list;\n+\tu16 i;\n+\n+\tif (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)\n+\t\treturn ICE_ERR_CFG;\n+\n+\tmutex_lock(&pi->sched_lock);\n+\n+\tfor (i = 0; i < num_queues; i++) {\n+\t\tstruct ice_sched_node *node;\n+\n+\t\tnode = ice_sched_find_node_by_teid(pi->root, q_teids[i]);\n+\t\tif (!node)\n+\t\t\tcontinue;\n+\t\tqg_list.parent_teid = node->info.parent_teid;\n+\t\tqg_list.num_qs = 1;\n+\t\tqg_list.q_id[0] = cpu_to_le16(q_ids[i]);\n+\t\tstatus = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,\n+\t\t\t\t\t    sizeof(qg_list), cd);\n+\n+\t\tif (status)\n+\t\t\tbreak;\n+\t\tice_free_sched_node(pi, node);\n+\t}\n+\tmutex_unlock(&pi->sched_lock);\n+\treturn status;\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h\nindex ab47204dfc5a..199199afaa28 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.h\n+++ b/drivers/net/ethernet/intel/ice/ice_common.h\n@@ -44,9 +44,15 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,\n \t\tstruct ice_sq_cd *cd);\n void ice_clear_pxe_mode(struct ice_hw *hw);\n enum ice_status ice_get_caps(struct ice_hw *hw);\n+enum ice_status\n+ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,\n+\t\t  u32 rxq_index);\n bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);\n enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);\n void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);\n+extern const struct ice_ctx_ele ice_tlan_ctx_info[];\n+enum ice_status\n+ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);\n enum ice_status\n ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,\n \t\tvoid *buf, u16 buf_size, struct ice_sq_cd *cd);\n@@ -55,4 +61,11 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);\n enum ice_status\n ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,\n \t\t     struct ice_link_status *link, struct ice_sq_cd *cd);\n+enum ice_status\n+ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,\n+\t\tu32 *q_teids, struct ice_sq_cd *cmd_details);\n+enum ice_status\n+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,\n+\t\tstruct ice_aqc_add_tx_qgrp *buf, u16 buf_size,\n+\t\tstruct ice_sq_cd *cd);\n #endif /* _ICE_COMMON_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h\nindex 700edc7e7280..6303489866a4 100644\n--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h\n+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h\n@@ -20,6 +20,7 @@\n #ifndef _ICE_HW_AUTOGEN_H_\n #define _ICE_HW_AUTOGEN_H_\n \n+#define QTX_COMM_DBELL(_DBQM)\t\t(0x002C0000 + ((_DBQM) * 4))\n #define PF_FW_ARQBAH\t\t\t0x00080180\n #define PF_FW_ARQBAL\t\t\t0x00080080\n #define PF_FW_ARQH\t\t\t0x00080380\n@@ -54,6 +55,44 @@\n #define PF_FW_ATQLEN_ATQENABLE_S\t31\n #define PF_FW_ATQLEN_ATQENABLE_M\tBIT(PF_FW_ATQLEN_ATQENABLE_S)\n #define PF_FW_ATQT\t\t\t0x00080400\n+\n+#define GLFLXP_RXDID_FLAGS(_i, _j)\t\t(0x0045D000 + ((_i) * 4 + (_j) * 256))\n+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S\t0\n+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M\tICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)\n+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S\t8\n+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M\tICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S)\n+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S\t16\n+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M\tICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S)\n+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S\t24\n+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M\tICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S)\n+#define GLFLXP_RXDID_FLX_WRD_0(_i)\t\t(0x0045c800 + ((_i) * 4))\n+#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S\t0\n+#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M\tICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S)\n+#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S\t30\n+#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M\tICE_M(0x3, GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S)\n+#define GLFLXP_RXDID_FLX_WRD_1(_i)\t\t(0x0045c900 + ((_i) * 4))\n+#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S\t0\n+#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M\tICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S)\n+#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S\t30\n+#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M\tICE_M(0x3, GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S)\n+#define GLFLXP_RXDID_FLX_WRD_2(_i)\t\t(0x0045ca00 + ((_i) * 4))\n+#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S\t0\n+#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M\tICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S)\n+#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S\t30\n+#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M\tICE_M(0x3, GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S)\n+#define GLFLXP_RXDID_FLX_WRD_3(_i)\t\t(0x0045cb00 + ((_i) * 4))\n+#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S\t0\n+#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M\tICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S)\n+#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S\t30\n+#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M\tICE_M(0x3, GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S)\n+\n+#define QRXFLXP_CNTXT(_QRX)\t\t(0x00480000 + ((_QRX) * 4))\n+#define QRXFLXP_CNTXT_RXDID_IDX_S\t0\n+#define QRXFLXP_CNTXT_RXDID_IDX_M\tICE_M(0x3F, QRXFLXP_CNTXT_RXDID_IDX_S)\n+#define QRXFLXP_CNTXT_RXDID_PRIO_S\t8\n+#define QRXFLXP_CNTXT_RXDID_PRIO_M\tICE_M(0x7, QRXFLXP_CNTXT_RXDID_PRIO_S)\n+#define QRXFLXP_CNTXT_TS_S\t\t11\n+#define QRXFLXP_CNTXT_TS_M\t\tBIT(QRXFLXP_CNTXT_TS_S)\n #define GLGEN_RSTAT\t\t\t0x000B8188\n #define GLGEN_RSTAT_DEVSTATE_S\t\t0\n #define GLGEN_RSTAT_DEVSTATE_M\t\tICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S)\n@@ -76,6 +115,8 @@\n #define GLINT_DYN_CTL_INTENA_M\t\tBIT(GLINT_DYN_CTL_INTENA_S)\n #define GLINT_DYN_CTL_CLEARPBA_S\t1\n #define GLINT_DYN_CTL_CLEARPBA_M\tBIT(GLINT_DYN_CTL_CLEARPBA_S)\n+#define GLINT_DYN_CTL_SWINT_TRIG_S\t2\n+#define GLINT_DYN_CTL_SWINT_TRIG_M\tBIT(GLINT_DYN_CTL_SWINT_TRIG_S)\n #define GLINT_DYN_CTL_ITR_INDX_S\t3\n #define GLINT_DYN_CTL_SW_ITR_INDX_S\t25\n #define GLINT_DYN_CTL_SW_ITR_INDX_M\tICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S)\n@@ -120,7 +161,25 @@\n #define PFINT_OICR_CTL_CAUSE_ENA_S\t30\n #define PFINT_OICR_CTL_CAUSE_ENA_M\tBIT(PFINT_OICR_CTL_CAUSE_ENA_S)\n #define PFINT_OICR_ENA\t\t\t0x0016C900\n+#define QINT_RQCTL(_QRX)\t\t(0x00150000 + ((_QRX) * 4))\n+#define QINT_RQCTL_MSIX_INDX_S\t\t0\n+#define QINT_RQCTL_ITR_INDX_S\t\t11\n+#define QINT_RQCTL_CAUSE_ENA_S\t\t30\n+#define QINT_RQCTL_CAUSE_ENA_M\t\tBIT(QINT_RQCTL_CAUSE_ENA_S)\n+#define QINT_TQCTL(_DBQM)\t\t(0x00140000 + ((_DBQM) * 4))\n+#define QINT_TQCTL_MSIX_INDX_S\t\t0\n+#define QINT_TQCTL_ITR_INDX_S\t\t11\n+#define QINT_TQCTL_CAUSE_ENA_S\t\t30\n+#define QINT_TQCTL_CAUSE_ENA_M\t\tBIT(QINT_TQCTL_CAUSE_ENA_S)\n #define GLLAN_RCTL_0\t\t\t0x002941F8\n+#define QRX_CONTEXT(_i, _QRX)\t\t(0x00280000 + ((_i) * 8192 + (_QRX) * 4))\n+#define QRX_CTRL(_QRX)\t\t\t(0x00120000 + ((_QRX) * 4))\n+#define QRX_CTRL_MAX_INDEX\t\t2047\n+#define QRX_CTRL_QENA_REQ_S\t\t0\n+#define QRX_CTRL_QENA_REQ_M\t\tBIT(QRX_CTRL_QENA_REQ_S)\n+#define QRX_CTRL_QENA_STAT_S\t\t2\n+#define QRX_CTRL_QENA_STAT_M\t\tBIT(QRX_CTRL_QENA_STAT_S)\n+#define QRX_TAIL(_QRX)\t\t\t(0x00290000 + ((_QRX) * 4))\n #define GLNVM_FLA\t\t\t0x000B6108\n #define GLNVM_FLA_LOCKED_S\t\t6\n #define GLNVM_FLA_LOCKED_M\t\tBIT(GLNVM_FLA_LOCKED_S)\ndiff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h\nnew file mode 100644\nindex 000000000000..0cdf1ae480cf\n--- /dev/null\n+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h\n@@ -0,0 +1,260 @@\n+/* SPDX-License-Identifier: GPL-2.0-only */\n+/* Intel(R) Ethernet Connection E800 Series Linux Driver\n+ * Copyright (c) 2018, Intel Corporation.\n+ *\n+ * This program is free software; you can redistribute it and/or modify it\n+ * under the terms and conditions of the GNU General Public License,\n+ * version 2, as published by the Free Software Foundation.\n+ *\n+ * This program is distributed in the hope it will be useful, but WITHOUT\n+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for\n+ * more details.\n+ *\n+ * The full GNU General Public License is included in this distribution in\n+ * the file called \"COPYING\".\n+ */\n+\n+#ifndef _ICE_LAN_TX_RX_H_\n+#define _ICE_LAN_TX_RX_H_\n+\n+union ice_32byte_rx_desc {\n+\tstruct {\n+\t\t__le64  pkt_addr; /* Packet buffer address */\n+\t\t__le64  hdr_addr; /* Header buffer address */\n+\t\t\t/* bit 0 of hdr_addr is DD bit */\n+\t\t__le64  rsvd1;\n+\t\t__le64  rsvd2;\n+\t} read;\n+\tstruct {\n+\t\tstruct {\n+\t\t\tstruct {\n+\t\t\t\t__le16 mirroring_status;\n+\t\t\t\t__le16 l2tag1;\n+\t\t\t} lo_dword;\n+\t\t\tunion {\n+\t\t\t\t__le32 rss; /* RSS Hash */\n+\t\t\t\t__le32 fd_id; /* Flow Director filter id */\n+\t\t\t} hi_dword;\n+\t\t} qword0;\n+\t\tstruct {\n+\t\t\t/* status/error/PTYPE/length */\n+\t\t\t__le64 status_error_len;\n+\t\t} qword1;\n+\t\tstruct {\n+\t\t\t__le16 ext_status; /* extended status */\n+\t\t\t__le16 rsvd;\n+\t\t\t__le16 l2tag2_1;\n+\t\t\t__le16 l2tag2_2;\n+\t\t} qword2;\n+\t\tstruct {\n+\t\t\t__le32 reserved;\n+\t\t\t__le32 fd_id;\n+\t\t} qword3;\n+\t} wb; /* writeback */\n+};\n+\n+/* RX Flex Descriptor\n+ * This descriptor is used instead of the legacy version descriptor when\n+ * ice_rlan_ctx.adv_desc is set\n+ */\n+union ice_32b_rx_flex_desc {\n+\tstruct {\n+\t\t__le64  pkt_addr; /* Packet buffer address */\n+\t\t__le64  hdr_addr; /* Header buffer address */\n+\t\t\t\t  /* bit 0 of hdr_addr is DD bit */\n+\t\t__le64  rsvd1;\n+\t\t__le64  rsvd2;\n+\t} read;\n+\tstruct {\n+\t\t/* Qword 0 */\n+\t\tu8 rxdid; /* descriptor builder profile id */\n+\t\tu8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */\n+\t\t__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */\n+\t\t__le16 pkt_len; /* [15:14] are reserved */\n+\t\t__le16 hdr_len_sph_flex_flags1; /* header=[10:0] */\n+\t\t\t\t\t\t/* sph=[11:11] */\n+\t\t\t\t\t\t/* ff1/ext=[15:12] */\n+\n+\t\t/* Qword 1 */\n+\t\t__le16 status_error0;\n+\t\t__le16 l2tag1;\n+\t\t__le16 flex_meta0;\n+\t\t__le16 flex_meta1;\n+\n+\t\t/* Qword 2 */\n+\t\t__le16 status_error1;\n+\t\tu8 flex_flags2;\n+\t\tu8 time_stamp_low;\n+\t\t__le16 l2tag2_1st;\n+\t\t__le16 l2tag2_2nd;\n+\n+\t\t/* Qword 3 */\n+\t\t__le16 flex_meta2;\n+\t\t__le16 flex_meta3;\n+\t\tunion {\n+\t\t\tstruct {\n+\t\t\t\t__le16 flex_meta4;\n+\t\t\t\t__le16 flex_meta5;\n+\t\t\t} flex;\n+\t\t\t__le32 ts_high;\n+\t\t} flex_ts;\n+\t} wb; /* writeback */\n+};\n+\n+/* Receive Flex Descriptor profile IDs: There are a total\n+ * of 64 profiles where profile IDs 0/1 are for legacy; and\n+ * profiles 2-63 are flex profiles that can be programmed\n+ * with a specific metadata (profile 7 reserved for HW)\n+ */\n+enum ice_rxdid {\n+\tICE_RXDID_START\t\t\t= 0,\n+\tICE_RXDID_LEGACY_0\t\t= ICE_RXDID_START,\n+\tICE_RXDID_LEGACY_1,\n+\tICE_RXDID_FLX_START,\n+\tICE_RXDID_FLEX_NIC\t\t= ICE_RXDID_FLX_START,\n+\tICE_RXDID_FLX_LAST\t\t= 63,\n+\tICE_RXDID_LAST\t\t\t= ICE_RXDID_FLX_LAST\n+};\n+\n+/* Receive Flex Descriptor Rx opcode values */\n+#define ICE_RX_OPC_MDID\t\t0x01\n+\n+/* Receive Descriptor MDID values */\n+#define ICE_RX_MDID_FLOW_ID_LOWER\t5\n+#define ICE_RX_MDID_FLOW_ID_HIGH\t6\n+#define ICE_RX_MDID_HASH_LOW\t\t56\n+#define ICE_RX_MDID_HASH_HIGH\t\t57\n+\n+/* Rx Flag64 packet flag bits */\n+enum ice_rx_flg64_bits {\n+\tICE_RXFLG_PKT_DSI\t= 0,\n+\tICE_RXFLG_EVLAN_x8100\t= 15,\n+\tICE_RXFLG_EVLAN_x9100,\n+\tICE_RXFLG_VLAN_x8100,\n+\tICE_RXFLG_TNL_MAC\t= 22,\n+\tICE_RXFLG_TNL_VLAN,\n+\tICE_RXFLG_PKT_FRG,\n+\tICE_RXFLG_FIN\t\t= 32,\n+\tICE_RXFLG_SYN,\n+\tICE_RXFLG_RST,\n+\tICE_RXFLG_TNL0\t\t= 38,\n+\tICE_RXFLG_TNL1,\n+\tICE_RXFLG_TNL2,\n+\tICE_RXFLG_UDP_GRE,\n+\tICE_RXFLG_RSVD\t\t= 63\n+};\n+\n+#define ICE_RXQ_CTX_SIZE_DWORDS\t\t8\n+#define ICE_RXQ_CTX_SZ\t\t\t(ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))\n+\n+/* RLAN Rx queue context data\n+ *\n+ * The sizes of the variables may be larger than needed due to crossing byte\n+ * boundaries. If we do not have the width of the variable set to the correct\n+ * size then we could end up shifting bits off the top of the variable when the\n+ * variable is at the top of a byte and crosses over into the next byte.\n+ */\n+struct ice_rlan_ctx {\n+\tu16 head;\n+\tu16 cpuid; /* bigger than needed, see above for reason */\n+\tu64 base;\n+\tu16 qlen;\n+#define ICE_RLAN_CTX_DBUF_S 7\n+\tu16 dbuf; /* bigger than needed, see above for reason */\n+#define ICE_RLAN_CTX_HBUF_S 6\n+\tu16 hbuf; /* bigger than needed, see above for reason */\n+\tu8  dtype;\n+\tu8  dsize;\n+\tu8  crcstrip;\n+\tu8  l2tsel;\n+\tu8  hsplit_0;\n+\tu8  hsplit_1;\n+\tu8  showiv;\n+\tu32 rxmax; /* bigger than needed, see above for reason */\n+\tu8  tphrdesc_ena;\n+\tu8  tphwdesc_ena;\n+\tu8  tphdata_ena;\n+\tu8  tphhead_ena;\n+\tu16 lrxqthresh; /* bigger than needed, see above for reason */\n+};\n+\n+struct ice_ctx_ele {\n+\tu16 offset;\n+\tu16 size_of;\n+\tu16 width;\n+\tu16 lsb;\n+};\n+\n+#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) {\t\\\n+\t.offset = offsetof(struct _struct, _ele),\t\\\n+\t.size_of = FIELD_SIZEOF(struct _struct, _ele),\t\\\n+\t.width = _width,\t\t\t\t\\\n+\t.lsb = _lsb,\t\t\t\t\t\\\n+}\n+\n+/* for hsplit_0 field of Rx RLAN context */\n+enum ice_rlan_ctx_rx_hsplit_0 {\n+\tICE_RLAN_RX_HSPLIT_0_NO_SPLIT\t\t= 0,\n+\tICE_RLAN_RX_HSPLIT_0_SPLIT_L2\t\t= 1,\n+\tICE_RLAN_RX_HSPLIT_0_SPLIT_IP\t\t= 2,\n+\tICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP\t= 4,\n+\tICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP\t\t= 8,\n+};\n+\n+/* for hsplit_1 field of Rx RLAN context */\n+enum ice_rlan_ctx_rx_hsplit_1 {\n+\tICE_RLAN_RX_HSPLIT_1_NO_SPLIT\t\t= 0,\n+\tICE_RLAN_RX_HSPLIT_1_SPLIT_L2\t\t= 1,\n+\tICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS\t= 2,\n+};\n+\n+/* TX Descriptor */\n+struct ice_tx_desc {\n+\t__le64 buf_addr; /* Address of descriptor's data buf */\n+\t__le64 cmd_type_offset_bsz;\n+};\n+\n+#define ICE_LAN_TXQ_MAX_QGRPS\t127\n+#define ICE_LAN_TXQ_MAX_QDIS\t1023\n+\n+/* Tx queue context data\n+ *\n+ * The sizes of the variables may be larger than needed due to crossing byte\n+ * boundaries. If we do not have the width of the variable set to the correct\n+ * size then we could end up shifting bits off the top of the variable when the\n+ * variable is at the top of a byte and crosses over into the next byte.\n+ */\n+struct ice_tlan_ctx {\n+#define ICE_TLAN_CTX_BASE_S\t7\n+\tu64 base;\t\t/* base is defined in 128-byte units */\n+\tu8  port_num;\n+\tu16 cgd_num;\t\t/* bigger than needed, see above for reason */\n+\tu8  pf_num;\n+\tu16 vmvf_num;\n+\tu8  vmvf_type;\n+#define ICE_TLAN_CTX_VMVF_TYPE_VMQ\t1\n+#define ICE_TLAN_CTX_VMVF_TYPE_PF\t2\n+\tu16 src_vsi;\n+\tu8  tsyn_ena;\n+\tu8  alt_vlan;\n+\tu16 cpuid;\t\t/* bigger than needed, see above for reason */\n+\tu8  wb_mode;\n+\tu8  tphrd_desc;\n+\tu8  tphrd;\n+\tu8  tphwr_desc;\n+\tu16 cmpq_id;\n+\tu16 qnum_in_func;\n+\tu8  itr_notification_mode;\n+\tu8  adjust_prof_id;\n+\tu32 qlen;\t\t/* bigger than needed, see above for reason */\n+\tu8  quanta_prof_idx;\n+\tu8  tso_ena;\n+\tu16 tso_qnum;\n+\tu8  legacy_int;\n+\tu8  drop_ena;\n+\tu8  cache_prof_idx;\n+\tu8  pkt_shaper_prof_idx;\n+\tu8  int_q_state;\t/* width not needed - internal do not write */\n+};\n+#endif /* _ICE_LAN_TX_RX_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c\nindex ae13fe979397..afb400a1f1d2 100644\n--- a/drivers/net/ethernet/intel/ice/ice_main.c\n+++ b/drivers/net/ethernet/intel/ice/ice_main.c\n@@ -41,6 +41,7 @@ MODULE_PARM_DESC(debug, \"netif level (0=none,...,16=all)\");\n #endif /* !CONFIG_DYNAMIC_DEBUG */\n \n static struct workqueue_struct *ice_wq;\n+static const struct net_device_ops ice_netdev_ops;\n \n static int ice_vsi_release(struct ice_vsi *vsi);\n \n@@ -227,6 +228,75 @@ static void ice_free_fltr_list(struct device *dev, struct list_head *h)\n \t}\n }\n \n+/**\n+ * ice_print_link_msg - print link up or down message\n+ * @vsi: the VSI whose link status is being queried\n+ * @isup: boolean for if the link is now up or down\n+ */\n+static void ice_print_link_msg(struct ice_vsi *vsi, bool isup)\n+{\n+\tconst char *speed;\n+\tconst char *fc;\n+\n+\tif (vsi->current_isup == isup)\n+\t\treturn;\n+\n+\tvsi->current_isup = isup;\n+\n+\tif (!isup) {\n+\t\tnetdev_info(vsi->netdev, \"NIC Link is Down\\n\");\n+\t\treturn;\n+\t}\n+\n+\tswitch (vsi->port_info->phy.link_info.link_speed) {\n+\tcase ICE_AQ_LINK_SPEED_40GB:\n+\t\tspeed = \"40 G\";\n+\t\tbreak;\n+\tcase ICE_AQ_LINK_SPEED_25GB:\n+\t\tspeed = \"25 G\";\n+\t\tbreak;\n+\tcase ICE_AQ_LINK_SPEED_20GB:\n+\t\tspeed = \"20 G\";\n+\t\tbreak;\n+\tcase ICE_AQ_LINK_SPEED_10GB:\n+\t\tspeed = \"10 G\";\n+\t\tbreak;\n+\tcase ICE_AQ_LINK_SPEED_5GB:\n+\t\tspeed = \"5 G\";\n+\t\tbreak;\n+\tcase ICE_AQ_LINK_SPEED_2500MB:\n+\t\tspeed = \"2.5 G\";\n+\t\tbreak;\n+\tcase ICE_AQ_LINK_SPEED_1000MB:\n+\t\tspeed = \"1 G\";\n+\t\tbreak;\n+\tcase ICE_AQ_LINK_SPEED_100MB:\n+\t\tspeed = \"100 M\";\n+\t\tbreak;\n+\tdefault:\n+\t\tspeed = \"Unknown\";\n+\t\tbreak;\n+\t}\n+\n+\tswitch (vsi->port_info->fc.current_mode) {\n+\tcase ICE_FC_FULL:\n+\t\tfc = \"RX/TX\";\n+\t\tbreak;\n+\tcase ICE_FC_TX_PAUSE:\n+\t\tfc = \"TX\";\n+\t\tbreak;\n+\tcase ICE_FC_RX_PAUSE:\n+\t\tfc = \"RX\";\n+\t\tbreak;\n+\tdefault:\n+\t\tfc = \"Unknown\";\n+\t\tbreak;\n+\t}\n+\n+\tnetdev_info(vsi->netdev, \"NIC Link is up %sbps, Flow Control: %s\\n\",\n+\t\t    speed, fc);\n+}\n+\n /**\n  * __ice_clean_ctrlq - helper function to clean controlq rings\n  * @pf: ptr to struct ice_pf\n@@ -422,6 +492,104 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)\n \thw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;\n }\n \n+/**\n+ * ice_irq_affinity_notify - Callback for affinity changes\n+ * @notify: context as to what irq was changed\n+ * @mask: the new affinity mask\n+ *\n+ * This is a callback function used by the irq_set_affinity_notifier function\n+ * so that we may register to receive changes to the irq affinity masks.\n+ */\n+static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,\n+\t\t\t\t    const cpumask_t *mask)\n+{\n+\tstruct ice_q_vector *q_vector =\n+\t\tcontainer_of(notify, struct ice_q_vector, affinity_notify);\n+\n+\tcpumask_copy(&q_vector->affinity_mask, mask);\n+}\n+\n+/**\n+ * ice_irq_affinity_release - Callback for affinity notifier release\n+ * @ref: internal core kernel usage\n+ *\n+ * This is a callback function used by the irq_set_affinity_notifier function\n+ * to inform the current notification subscriber that they will no longer\n+ * receive notifications.\n+ */\n+static void ice_irq_affinity_release(struct kref __always_unused *ref) {}\n+\n+/**\n+ * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI\n+ * @vsi: the VSI being un-configured\n+ */\n+static void ice_vsi_dis_irq(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tint base = vsi->base_vector;\n+\tu32 val;\n+\tint i;\n+\n+\t/* disable interrupt causation from each queue */\n+\tif (vsi->tx_rings) {\n+\t\tice_for_each_txq(vsi, i) {\n+\t\t\tif (vsi->tx_rings[i]) {\n+\t\t\t\tu16 reg;\n+\n+\t\t\t\treg = vsi->tx_rings[i]->reg_idx;\n+\t\t\t\tval = rd32(hw, QINT_TQCTL(reg));\n+\t\t\t\tval &= ~QINT_TQCTL_CAUSE_ENA_M;\n+\t\t\t\twr32(hw, QINT_TQCTL(reg), val);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tif (vsi->rx_rings) {\n+\t\tice_for_each_rxq(vsi, i) {\n+\t\t\tif (vsi->rx_rings[i]) {\n+\t\t\t\tu16 reg;\n+\n+\t\t\t\treg = vsi->rx_rings[i]->reg_idx;\n+\t\t\t\tval = rd32(hw, QINT_RQCTL(reg));\n+\t\t\t\tval &= ~QINT_RQCTL_CAUSE_ENA_M;\n+\t\t\t\twr32(hw, QINT_RQCTL(reg), val);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t/* disable each interrupt */\n+\tif (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {\n+\t\tfor (i = vsi->base_vector;\n+\t\t     i < (vsi->num_q_vectors + vsi->base_vector); i++)\n+\t\t\twr32(hw, GLINT_DYN_CTL(i), 0);\n+\n+\t\tice_flush(hw);\n+\t\tfor (i = 0; i < vsi->num_q_vectors; i++)\n+\t\t\tsynchronize_irq(pf->msix_entries[i + base].vector);\n+\t}\n+}\n+\n+/**\n+ * ice_vsi_ena_irq - Enable IRQ for the given VSI\n+ * @vsi: the VSI being configured\n+ */\n+static int ice_vsi_ena_irq(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_hw *hw = &pf->hw;\n+\n+\tif (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {\n+\t\tint i;\n+\n+\t\tfor (i = 0; i < vsi->num_q_vectors; i++)\n+\t\t\tice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);\n+\t}\n+\n+\tice_flush(hw);\n+\treturn 0;\n+}\n+\n /**\n  * ice_vsi_delete - delete a VSI from the switch\n  * @vsi: pointer to VSI being removed\n@@ -442,6 +610,73 @@ static void ice_vsi_delete(struct ice_vsi *vsi)\n \t\t\tvsi->vsi_num);\n }\n \n+/**\n+ * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI\n+ * @vsi: the VSI being configured\n+ * @basename: name for the vector\n+ */\n+static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)\n+{\n+\tint q_vectors = vsi->num_q_vectors;\n+\tstruct ice_pf *pf = vsi->back;\n+\tint base = vsi->base_vector;\n+\tint rx_int_idx = 0;\n+\tint tx_int_idx = 0;\n+\tint vector, err;\n+\tint irq_num;\n+\n+\tfor (vector = 0; vector < q_vectors; vector++) {\n+\t\tstruct ice_q_vector *q_vector = vsi->q_vectors[vector];\n+\n+\t\tirq_num = pf->msix_entries[base + vector].vector;\n+\n+\t\tif (q_vector->tx.ring && q_vector->rx.ring) {\n+\t\t\tsnprintf(q_vector->name, sizeof(q_vector->name) - 1,\n+\t\t\t\t \"%s-%s-%d\", basename, \"TxRx\", rx_int_idx++);\n+\t\t\ttx_int_idx++;\n+\t\t} else if (q_vector->rx.ring) {\n+\t\t\tsnprintf(q_vector->name, sizeof(q_vector->name) - 1,\n+\t\t\t\t \"%s-%s-%d\", basename, \"rx\", rx_int_idx++);\n+\t\t} else if (q_vector->tx.ring) {\n+\t\t\tsnprintf(q_vector->name, sizeof(q_vector->name) - 1,\n+\t\t\t\t \"%s-%s-%d\", basename, \"tx\", tx_int_idx++);\n+\t\t} else {\n+\t\t\t/* skip this unused q_vector */\n+\t\t\tcontinue;\n+\t\t}\n+\t\terr = devm_request_irq(&pf->pdev->dev,\n+\t\t\t\t       pf->msix_entries[base + vector].vector,\n+\t\t\t\t       vsi->irq_handler, 0, q_vector->name,\n+\t\t\t\t       q_vector);\n+\t\tif (err) {\n+\t\t\tnetdev_err(vsi->netdev,\n+\t\t\t\t   \"MSIX request_irq failed, error: %d\\n\", err);\n+\t\t\tgoto free_q_irqs;\n+\t\t}\n+\n+\t\t/* register for affinity change notifications */\n+\t\tq_vector->affinity_notify.notify = ice_irq_affinity_notify;\n+\t\tq_vector->affinity_notify.release = ice_irq_affinity_release;\n+\t\tirq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);\n+\n+\t\t/* assign the mask for this irq */\n+\t\tirq_set_affinity_hint(irq_num, &q_vector->affinity_mask);\n+\t}\n+\n+\tvsi->irqs_ready = true;\n+\treturn 0;\n+\n+free_q_irqs:\n+\twhile (vector) {\n+\t\tvector--;\n+\t\tirq_num = pf->msix_entries[base + vector].vector,\n+\t\tirq_set_affinity_notifier(irq_num, NULL);\n+\t\tirq_set_affinity_hint(irq_num, NULL);\n+\t\tdevm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);\n+\t}\n+\treturn err;\n+}\n+\n /**\n  * ice_vsi_setup_q_map - Setup a VSI queue map\n  * @vsi: the VSI being configured\n@@ -604,6 +839,38 @@ static int ice_vsi_add(struct ice_vsi *vsi)\n \treturn ret;\n }\n \n+/**\n+ * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW\n+ * @vsi: the VSI being cleaned up\n+ */\n+static void ice_vsi_release_msix(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tu16 vector = vsi->base_vector;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tu32 txq = 0;\n+\tu32 rxq = 0;\n+\tint i, q;\n+\n+\tfor (i = 0; i < vsi->num_q_vectors; i++, vector++) {\n+\t\tstruct ice_q_vector *q_vector = vsi->q_vectors[i];\n+\n+\t\twr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);\n+\t\twr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);\n+\t\tfor (q = 0; q < q_vector->num_ring_tx; q++) {\n+\t\t\twr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);\n+\t\t\ttxq++;\n+\t\t}\n+\n+\t\tfor (q = 0; q < q_vector->num_ring_rx; q++) {\n+\t\t\twr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);\n+\t\t\trxq++;\n+\t\t}\n+\t}\n+\n+\tice_flush(hw);\n+}\n+\n /**\n  * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI\n  * @vsi: the VSI having rings deallocated\n@@ -686,6 +953,118 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)\n \treturn -ENOMEM;\n }\n \n+/**\n+ * ice_vsi_free_irq - Free the irq association with the OS\n+ * @vsi: the VSI being configured\n+ */\n+static void ice_vsi_free_irq(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tint base = vsi->base_vector;\n+\n+\tif (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {\n+\t\tint i;\n+\n+\t\tif (!vsi->q_vectors || !vsi->irqs_ready)\n+\t\t\treturn;\n+\n+\t\tvsi->irqs_ready = false;\n+\t\tfor (i = 0; i < vsi->num_q_vectors; i++) {\n+\t\t\tu16 vector = i + base;\n+\t\t\tint irq_num;\n+\n+\t\t\tirq_num = pf->msix_entries[vector].vector;\n+\n+\t\t\t/* free only the irqs that were actually requested */\n+\t\t\tif (!vsi->q_vectors[i] ||\n+\t\t\t    !(vsi->q_vectors[i]->num_ring_tx ||\n+\t\t\t      vsi->q_vectors[i]->num_ring_rx))\n+\t\t\t\tcontinue;\n+\n+\t\t\t/* clear the affinity notifier in the IRQ descriptor */\n+\t\t\tirq_set_affinity_notifier(irq_num, NULL);\n+\n+\t\t\t/* clear the affinity_mask in the IRQ descriptor */\n+\t\t\tirq_set_affinity_hint(irq_num, NULL);\n+\t\t\tsynchronize_irq(irq_num);\n+\t\t\tdevm_free_irq(&pf->pdev->dev, irq_num,\n+\t\t\t\t      vsi->q_vectors[i]);\n+\t\t}\n+\t\tice_vsi_release_msix(vsi);\n+\t}\n+}\n+\n+/**\n+ * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW\n+ * @vsi: the VSI being configured\n+ */\n+static void ice_vsi_cfg_msix(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tu16 vector = vsi->base_vector;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tu32 txq = 0, rxq = 0;\n+\tint i, q, itr;\n+\tu8 itr_gran;\n+\n+\tfor (i = 0; i < vsi->num_q_vectors; i++, vector++) {\n+\t\tstruct ice_q_vector *q_vector = vsi->q_vectors[i];\n+\n+\t\titr_gran = hw->itr_gran_200;\n+\n+\t\tif (q_vector->num_ring_rx) {\n+\t\t\tq_vector->rx.itr =\n+\t\t\t\tITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,\n+\t\t\t\t\t   itr_gran);\n+\t\t\tq_vector->rx.latency_range = ICE_LOW_LATENCY;\n+\t\t}\n+\n+\t\tif (q_vector->num_ring_tx) {\n+\t\t\tq_vector->tx.itr =\n+\t\t\t\tITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,\n+\t\t\t\t\t   itr_gran);\n+\t\t\tq_vector->tx.latency_range = ICE_LOW_LATENCY;\n+\t\t}\n+\t\twr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);\n+\t\twr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);\n+\n+\t\t/* Both Transmit Queue Interrupt Cause Control register\n+\t\t * and Receive Queue Interrupt Cause control register\n+\t\t * expects MSIX_INDX field to be the vector index\n+\t\t * within the function space and not the absolute\n+\t\t * vector index across PF or across device.\n+\t\t * For SR-IOV VF VSIs queue vector index always starts\n+\t\t * with 1 since first vector index(0) is used for OICR\n+\t\t * in VF space. Since VMDq and other PF VSIs are withtin\n+\t\t * the PF function space, use the vector index thats\n+\t\t * tracked for this PF.\n+\t\t */\n+\t\tfor (q = 0; q < q_vector->num_ring_tx; q++) {\n+\t\t\tu32 val;\n+\n+\t\t\titr = ICE_TX_ITR;\n+\t\t\tval = QINT_TQCTL_CAUSE_ENA_M |\n+\t\t\t      (itr << QINT_TQCTL_ITR_INDX_S)  |\n+\t\t\t      (vector << QINT_TQCTL_MSIX_INDX_S);\n+\t\t\twr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);\n+\t\t\ttxq++;\n+\t\t}\n+\n+\t\tfor (q = 0; q < q_vector->num_ring_rx; q++) {\n+\t\t\tu32 val;\n+\n+\t\t\titr = ICE_RX_ITR;\n+\t\t\tval = QINT_RQCTL_CAUSE_ENA_M |\n+\t\t\t      (itr << QINT_RQCTL_ITR_INDX_S)  |\n+\t\t\t      (vector << QINT_RQCTL_MSIX_INDX_S);\n+\t\t\twr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);\n+\t\t\trxq++;\n+\t\t}\n+\t}\n+\n+\tice_flush(hw);\n+}\n+\n /**\n  * ice_ena_misc_vector - enable the non-queue interrupts\n  * @pf: board private structure\n@@ -766,7 +1145,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)\n \twr32(hw, PFINT_OICR_ENA, ena_mask);\n \tif (!test_bit(__ICE_DOWN, pf->state)) {\n \t\tice_service_task_schedule(pf);\n-\t\tice_irq_dynamic_ena(hw);\n+\t\tice_irq_dynamic_ena(hw, NULL, NULL);\n \t}\n \n \treturn ret;\n@@ -1031,7 +1410,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)\n \t     ITR_TO_REG(ICE_ITR_8K, itr_gran));\n \n \tice_flush(hw);\n-\tice_irq_dynamic_ena(hw);\n+\tice_irq_dynamic_ena(hw, NULL, NULL);\n \n \treturn 0;\n }\n@@ -1276,6 +1655,9 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)\n \n \tnetdev->priv_flags |= IFF_UNICAST_FLT;\n \n+\t/* assign netdev_ops */\n+\tnetdev->netdev_ops = &ice_netdev_ops;\n+\n \t/* setup watchdog timeout value to be 5 second */\n \tnetdev->watchdog_timeo = 5 * HZ;\n \n@@ -2093,6 +2475,704 @@ static void __exit ice_module_exit(void)\n }\n module_exit(ice_module_exit);\n \n+/**\n+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance\n+ * @ring: The Tx ring to configure\n+ * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized\n+ * @pf_q: queue index in the PF space\n+ *\n+ * Configure the Tx descriptor ring in TLAN context.\n+ */\n+static void\n+ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)\n+{\n+\tstruct ice_vsi *vsi = ring->vsi;\n+\tstruct ice_hw *hw = &vsi->back->hw;\n+\n+\ttlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;\n+\n+\ttlan_ctx->port_num = vsi->port_info->lport;\n+\n+\t/* Transmit Queue Length */\n+\ttlan_ctx->qlen = ring->count;\n+\n+\t/* PF number */\n+\ttlan_ctx->pf_num = hw->pf_id;\n+\n+\t/* queue belongs to a specific VSI type\n+\t * VF / VM index should be programmed per vmvf_type setting:\n+\t * for vmvf_type = VF, it is VF number between 0-256\n+\t * for vmvf_type = VM, it is VM number between 0-767\n+\t * for PF or EMP this field should be set to zero\n+\t */\n+\tswitch (vsi->type) {\n+\tcase ICE_VSI_PF:\n+\t\ttlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn;\n+\t}\n+\n+\t/* make sure the context is associated with the right VSI */\n+\ttlan_ctx->src_vsi = vsi->vsi_num;\n+\n+\ttlan_ctx->tso_ena = ICE_TX_LEGACY;\n+\ttlan_ctx->tso_qnum = pf_q;\n+\n+\t/* Legacy or Advanced Host Interface:\n+\t * 0: Advanced Host Interface\n+\t * 1: Legacy Host Interface\n+\t */\n+\ttlan_ctx->legacy_int = ICE_TX_LEGACY;\n+}\n+\n+/**\n+ * ice_vsi_cfg_txqs - Configure the VSI for Tx\n+ * @vsi: the VSI being configured\n+ *\n+ * Return 0 on success and a negative value on error\n+ * Configure the Tx VSI for operation.\n+ */\n+static int ice_vsi_cfg_txqs(struct ice_vsi *vsi)\n+{\n+\tstruct ice_aqc_add_tx_qgrp *qg_buf;\n+\tstruct ice_aqc_add_txqs_perq *txq;\n+\tstruct ice_pf *pf = vsi->back;\n+\tenum ice_status status;\n+\tu16 buf_len, i, pf_q;\n+\tint err = 0, tc = 0;\n+\tu8 num_q_grps;\n+\n+\tbuf_len = sizeof(struct ice_aqc_add_tx_qgrp);\n+\tqg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);\n+\tif (!qg_buf)\n+\t\treturn -ENOMEM;\n+\n+\tif (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {\n+\t\terr = -EINVAL;\n+\t\tgoto err_cfg_txqs;\n+\t}\n+\tqg_buf->num_txqs = 1;\n+\tnum_q_grps = 1;\n+\n+\t/* set up and configure the tx queues */\n+\tice_for_each_txq(vsi, i) {\n+\t\tstruct ice_tlan_ctx tlan_ctx = { 0 };\n+\n+\t\tpf_q = vsi->txq_map[i];\n+\t\tice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);\n+\t\t/* copy context contents into the qg_buf */\n+\t\tqg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);\n+\t\tice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,\n+\t\t\t    ice_tlan_ctx_info);\n+\n+\t\t/* init queue specific tail reg. It is referred as transmit\n+\t\t * comm scheduler queue doorbell.\n+\t\t */\n+\t\tvsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);\n+\t\tstatus = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,\n+\t\t\t\t\t num_q_grps, qg_buf, buf_len, NULL);\n+\t\tif (status) {\n+\t\t\tdev_err(&vsi->back->pdev->dev,\n+\t\t\t\t\"Failed to set LAN Tx queue context, error: %d\\n\",\n+\t\t\t\tstatus);\n+\t\t\terr = -ENODEV;\n+\t\t\tgoto err_cfg_txqs;\n+\t\t}\n+\n+\t\t/* Add Tx Queue TEID into the VSI tx ring from the response\n+\t\t * This will complete configuring and enabling the queue.\n+\t\t */\n+\t\ttxq = &qg_buf->txqs[0];\n+\t\tif (pf_q == le16_to_cpu(txq->txq_id))\n+\t\t\tvsi->tx_rings[i]->txq_teid =\n+\t\t\t\tle32_to_cpu(txq->q_teid);\n+\t}\n+err_cfg_txqs:\n+\tdevm_kfree(&pf->pdev->dev, qg_buf);\n+\treturn err;\n+}\n+\n+/**\n+ * ice_setup_rx_ctx - Configure a receive ring context\n+ * @ring: The Rx ring to configure\n+ *\n+ * Configure the Rx descriptor ring in RLAN context.\n+ */\n+static int ice_setup_rx_ctx(struct ice_ring *ring)\n+{\n+\tstruct ice_vsi *vsi = ring->vsi;\n+\tstruct ice_hw *hw = &vsi->back->hw;\n+\tu32 rxdid = ICE_RXDID_FLEX_NIC;\n+\tstruct ice_rlan_ctx rlan_ctx;\n+\tu32 regval;\n+\tu16 pf_q;\n+\tint err;\n+\n+\t/* what is RX queue number in global space of 2K rx queues */\n+\tpf_q = vsi->rxq_map[ring->q_index];\n+\n+\t/* clear the context structure first */\n+\tmemset(&rlan_ctx, 0, sizeof(rlan_ctx));\n+\n+\trlan_ctx.base = ring->dma >> 7;\n+\n+\trlan_ctx.qlen = ring->count;\n+\n+\t/* Receive Packet Data Buffer Size.\n+\t * The Packet Data Buffer Size is defined in 128 byte units.\n+\t */\n+\trlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;\n+\n+\t/* use 32 byte descriptors */\n+\trlan_ctx.dsize = 1;\n+\n+\t/* Strip the Ethernet CRC bytes before the packet is posted to host\n+\t * memory.\n+\t */\n+\trlan_ctx.crcstrip = 1;\n+\n+\t/* L2TSEL flag defines the reported L2 Tags in the receive descriptor */\n+\trlan_ctx.l2tsel = 1;\n+\n+\trlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;\n+\trlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;\n+\trlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;\n+\n+\t/* This controls whether VLAN is stripped from inner headers\n+\t * The VLAN in the inner L2 header is stripped to the receive\n+\t * descriptor if enabled by this flag.\n+\t */\n+\trlan_ctx.showiv = 0;\n+\n+\t/* Max packet size for this queue - must not be set to a larger value\n+\t * than 5 x DBUF\n+\t */\n+\trlan_ctx.rxmax = min_t(u16, vsi->max_frame,\n+\t\t\t       ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);\n+\n+\t/* Rx queue threshold in units of 64 */\n+\trlan_ctx.lrxqthresh = 1;\n+\n+\t /* Enable Flexible Descriptors in the queue context which\n+\t  * allows this driver to select a specific receive descriptor format\n+\t  */\n+\tregval = rd32(hw, QRXFLXP_CNTXT(pf_q));\n+\tregval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &\n+\t\tQRXFLXP_CNTXT_RXDID_IDX_M;\n+\n+\t/* increasing context priority to pick up profile id;\n+\t * default is 0x01; setting to 0x03 to ensure profile\n+\t * is programming if prev context is of same priority\n+\t */\n+\tregval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &\n+\t\tQRXFLXP_CNTXT_RXDID_PRIO_M;\n+\n+\twr32(hw, QRXFLXP_CNTXT(pf_q), regval);\n+\n+\t/* Absolute queue number out of 2K needs to be passed */\n+\terr = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);\n+\tif (err) {\n+\t\tdev_err(&vsi->back->pdev->dev,\n+\t\t\t\"Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\\n\",\n+\t\t\tpf_q, err);\n+\t\treturn -EIO;\n+\t}\n+\n+\t/* init queue specific tail register */\n+\tring->tail = hw->hw_addr + QRX_TAIL(pf_q);\n+\twritel(0, ring->tail);\n+\tice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_vsi_cfg_rxqs - Configure the VSI for Rx\n+ * @vsi: the VSI being configured\n+ *\n+ * Return 0 on success and a negative value on error\n+ * Configure the Rx VSI for operation.\n+ */\n+static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)\n+{\n+\tint err = 0;\n+\tu16 i;\n+\n+\tif (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)\n+\t\tvsi->max_frame = vsi->netdev->mtu +\n+\t\t\tETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;\n+\telse\n+\t\tvsi->max_frame = ICE_RXBUF_2048;\n+\n+\tvsi->rx_buf_len = ICE_RXBUF_2048;\n+\t/* set up individual rings */\n+\tfor (i = 0; i < vsi->num_rxq && !err; i++)\n+\t\terr = ice_setup_rx_ctx(vsi->rx_rings[i]);\n+\n+\tif (err) {\n+\t\tdev_err(&vsi->back->pdev->dev, \"ice_setup_rx_ctx failed\\n\");\n+\t\treturn -EIO;\n+\t}\n+\treturn err;\n+}\n+\n+/**\n+ * ice_vsi_cfg - Setup the VSI\n+ * @vsi: the VSI being configured\n+ *\n+ * Return 0 on success and negative value on error\n+ */\n+static int ice_vsi_cfg(struct ice_vsi *vsi)\n+{\n+\tint err;\n+\n+\terr = ice_vsi_cfg_txqs(vsi);\n+\tif (!err)\n+\t\terr = ice_vsi_cfg_rxqs(vsi);\n+\n+\treturn err;\n+}\n+\n+/**\n+ * ice_vsi_stop_tx_rings - Disable Tx rings\n+ * @vsi: the VSI being configured\n+ */\n+static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tenum ice_status status;\n+\tu32 *q_teids, val;\n+\tu16 *q_ids, i;\n+\tint err = 0;\n+\n+\tif (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)\n+\t\treturn -EINVAL;\n+\n+\tq_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),\n+\t\t\t       GFP_KERNEL);\n+\tif (!q_teids)\n+\t\treturn -ENOMEM;\n+\n+\tq_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),\n+\t\t\t     GFP_KERNEL);\n+\tif (!q_ids) {\n+\t\terr = -ENOMEM;\n+\t\tgoto err_alloc_q_ids;\n+\t}\n+\n+\t/* set up the tx queue list to be disabled */\n+\tice_for_each_txq(vsi, i) {\n+\t\tu16 v_idx;\n+\n+\t\tif (!vsi->tx_rings || !vsi->tx_rings[i]) {\n+\t\t\terr = -EINVAL;\n+\t\t\tgoto err_out;\n+\t\t}\n+\n+\t\tq_ids[i] = vsi->txq_map[i];\n+\t\tq_teids[i] = vsi->tx_rings[i]->txq_teid;\n+\n+\t\t/* clear cause_ena bit for disabled queues */\n+\t\tval = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));\n+\t\tval &= ~QINT_TQCTL_CAUSE_ENA_M;\n+\t\twr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);\n+\n+\t\t/* software is expected to wait for 100 ns */\n+\t\tndelay(100);\n+\n+\t\t/* trigger a software interrupt for the vector associated to\n+\t\t * the queue to schedule napi handler\n+\t\t */\n+\t\tv_idx = vsi->tx_rings[i]->q_vector->v_idx;\n+\t\twr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),\n+\t\t     GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);\n+\t}\n+\tstatus = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,\n+\t\t\t\t NULL);\n+\tif (status) {\n+\t\tdev_err(&pf->pdev->dev,\n+\t\t\t\"Failed to disable LAN Tx queues, error: %d\\n\",\n+\t\t\tstatus);\n+\t\terr = -ENODEV;\n+\t}\n+\n+err_out:\n+\tdevm_kfree(&pf->pdev->dev, q_ids);\n+\n+err_alloc_q_ids:\n+\tdevm_kfree(&pf->pdev->dev, q_teids);\n+\n+\treturn err;\n+}\n+\n+/**\n+ * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled\n+ * @pf: the PF being configured\n+ * @pf_q: the PF queue\n+ * @ena: enable or disable state of the queue\n+ *\n+ * This routine will wait for the given Rx queue of the PF to reach the\n+ * enabled or disabled state.\n+ * Returns -ETIMEDOUT in case of failing to reach the requested state after\n+ * multiple retries; else will return 0 in case of success.\n+ */\n+static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {\n+\t\tu32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));\n+\n+\t\tif (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))\n+\t\t\tbreak;\n+\n+\t\tusleep_range(10, 20);\n+\t}\n+\tif (i >= ICE_Q_WAIT_RETRY_LIMIT)\n+\t\treturn -ETIMEDOUT;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings\n+ * @vsi: the VSI being configured\n+ * @ena: start or stop the rx rings\n+ */\n+static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tint i, j, ret = 0;\n+\n+\tfor (i = 0; i < vsi->num_rxq; i++) {\n+\t\tint pf_q = vsi->rxq_map[i];\n+\t\tu32 rx_reg;\n+\n+\t\tfor (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {\n+\t\t\trx_reg = rd32(hw, QRX_CTRL(pf_q));\n+\t\t\tif (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==\n+\t\t\t    ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))\n+\t\t\t\tbreak;\n+\t\t\tusleep_range(1000, 2000);\n+\t\t}\n+\n+\t\t/* Skip if the queue is already in the requested state */\n+\t\tif (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))\n+\t\t\tcontinue;\n+\n+\t\t/* turn on/off the queue */\n+\t\tif (ena)\n+\t\t\trx_reg |= QRX_CTRL_QENA_REQ_M;\n+\t\telse\n+\t\t\trx_reg &= ~QRX_CTRL_QENA_REQ_M;\n+\t\twr32(hw, QRX_CTRL(pf_q), rx_reg);\n+\n+\t\t/* wait for the change to finish */\n+\t\tret = ice_pf_rxq_wait(pf, pf_q, ena);\n+\t\tif (ret) {\n+\t\t\tdev_err(&pf->pdev->dev,\n+\t\t\t\t\"VSI idx %d Rx ring %d %sable timeout\\n\",\n+\t\t\t\tvsi->idx, pf_q, (ena ? \"en\" : \"dis\"));\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_vsi_start_rx_rings - start VSI's rx rings\n+ * @vsi: the VSI whose rings are to be started\n+ *\n+ * Returns 0 on success and a negative value on error\n+ */\n+static int ice_vsi_start_rx_rings(struct ice_vsi *vsi)\n+{\n+\treturn ice_vsi_ctrl_rx_rings(vsi, true);\n+}\n+\n+/**\n+ * ice_vsi_stop_rx_rings - stop VSI's rx rings\n+ * @vsi: the VSI\n+ *\n+ * Returns 0 on success and a negative value on error\n+ */\n+static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)\n+{\n+\treturn ice_vsi_ctrl_rx_rings(vsi, false);\n+}\n+\n+/**\n+ * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings\n+ * @vsi: the VSI\n+ * Returns 0 on success and a negative value on error\n+ */\n+static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi)\n+{\n+\tint err_tx, err_rx;\n+\n+\terr_tx = ice_vsi_stop_tx_rings(vsi);\n+\tif (err_tx)\n+\t\tdev_dbg(&vsi->back->pdev->dev, \"Failed to disable Tx rings\\n\");\n+\n+\terr_rx = ice_vsi_stop_rx_rings(vsi);\n+\tif (err_rx)\n+\t\tdev_dbg(&vsi->back->pdev->dev, \"Failed to disable Rx rings\\n\");\n+\n+\tif (err_tx || err_rx)\n+\t\treturn -EIO;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_up_complete - Finish the last steps of bringing up a connection\n+ * @vsi: The VSI being configured\n+ *\n+ * Return 0 on success and negative value on error\n+ */\n+static int ice_up_complete(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tint err;\n+\n+\tif (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))\n+\t\tice_vsi_cfg_msix(vsi);\n+\telse\n+\t\treturn -ENOTSUPP;\n+\n+\t/* Enable only Rx rings, Tx rings were enabled by the FW when the\n+\t * Tx queue group list was configured and the context bits were\n+\t * programmed using ice_vsi_cfg_txqs\n+\t */\n+\terr = ice_vsi_start_rx_rings(vsi);\n+\tif (err)\n+\t\treturn err;\n+\n+\tclear_bit(__ICE_DOWN, vsi->state);\n+\tice_vsi_ena_irq(vsi);\n+\n+\tif (vsi->port_info &&\n+\t    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&\n+\t    vsi->netdev) {\n+\t\tice_print_link_msg(vsi, true);\n+\t\tnetif_tx_start_all_queues(vsi->netdev);\n+\t\tnetif_carrier_on(vsi->netdev);\n+\t}\n+\n+\tice_service_task_schedule(pf);\n+\n+\treturn err;\n+}\n+\n+/**\n+ * ice_down - Shutdown the connection\n+ * @vsi: The VSI being stopped\n+ */\n+static int ice_down(struct ice_vsi *vsi)\n+{\n+\tint i, err;\n+\n+\t/* Caller of this function is expected to set the\n+\t * vsi->state __ICE_DOWN bit\n+\t */\n+\tif (vsi->netdev) {\n+\t\tnetif_carrier_off(vsi->netdev);\n+\t\tnetif_tx_disable(vsi->netdev);\n+\t}\n+\n+\tice_vsi_dis_irq(vsi);\n+\terr = ice_vsi_stop_tx_rx_rings(vsi);\n+\n+\tice_for_each_txq(vsi, i)\n+\t\tice_clean_tx_ring(vsi->tx_rings[i]);\n+\n+\tice_for_each_rxq(vsi, i)\n+\t\tice_clean_rx_ring(vsi->rx_rings[i]);\n+\n+\tif (err)\n+\t\tnetdev_err(vsi->netdev, \"Failed to close VSI 0x%04X on switch 0x%04X\\n\",\n+\t\t\t   vsi->vsi_num, vsi->vsw->sw_id);\n+\treturn err;\n+}\n+\n+/**\n+ * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources\n+ * @vsi: VSI having resources allocated\n+ *\n+ * Return 0 on success, negative on failure\n+ */\n+static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)\n+{\n+\tint i, err;\n+\n+\tif (!vsi->num_txq) {\n+\t\tdev_err(&vsi->back->pdev->dev, \"VSI %d has 0 Tx queues\\n\",\n+\t\t\tvsi->vsi_num);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tice_for_each_txq(vsi, i) {\n+\t\terr = ice_setup_tx_ring(vsi->tx_rings[i]);\n+\t\tif (err)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn err;\n+}\n+\n+/**\n+ * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources\n+ * @vsi: VSI having resources allocated\n+ *\n+ * Return 0 on success, negative on failure\n+ */\n+static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)\n+{\n+\tint i, err;\n+\n+\tif (!vsi->num_rxq) {\n+\t\tdev_err(&vsi->back->pdev->dev, \"VSI %d has 0 Rx queues\\n\",\n+\t\t\tvsi->vsi_num);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tice_for_each_rxq(vsi, i) {\n+\t\terr = ice_setup_rx_ring(vsi->rx_rings[i]);\n+\t\tif (err)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn err;\n+}\n+\n+/**\n+ * ice_vsi_req_irq - Request IRQ from the OS\n+ * @vsi: The VSI IRQ is being requested for\n+ * @basename: name for the vector\n+ *\n+ * Return 0 on success and a negative value on error\n+ */\n+static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tint err = -EINVAL;\n+\n+\tif (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))\n+\t\terr = ice_vsi_req_irq_msix(vsi, basename);\n+\n+\treturn err;\n+}\n+\n+/**\n+ * ice_vsi_free_tx_rings - Free Tx resources for VSI queues\n+ * @vsi: the VSI having resources freed\n+ */\n+static void ice_vsi_free_tx_rings(struct ice_vsi *vsi)\n+{\n+\tint i;\n+\n+\tif (!vsi->tx_rings)\n+\t\treturn;\n+\n+\tice_for_each_txq(vsi, i)\n+\t\tif (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)\n+\t\t\tice_free_tx_ring(vsi->tx_rings[i]);\n+}\n+\n+/**\n+ * ice_vsi_free_rx_rings - Free Rx resources for VSI queues\n+ * @vsi: the VSI having resources freed\n+ */\n+static void ice_vsi_free_rx_rings(struct ice_vsi *vsi)\n+{\n+\tint i;\n+\n+\tif (!vsi->rx_rings)\n+\t\treturn;\n+\n+\tice_for_each_rxq(vsi, i)\n+\t\tif (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)\n+\t\t\tice_free_rx_ring(vsi->rx_rings[i]);\n+}\n+\n+/**\n+ * ice_vsi_open - Called when a network interface is made active\n+ * @vsi: the VSI to open\n+ *\n+ * Initialization of the VSI\n+ *\n+ * Returns 0 on success, negative value on error\n+ */\n+static int ice_vsi_open(struct ice_vsi *vsi)\n+{\n+\tchar int_name[ICE_INT_NAME_STR_LEN];\n+\tstruct ice_pf *pf = vsi->back;\n+\tint err;\n+\n+\t/* allocate descriptors */\n+\terr = ice_vsi_setup_tx_rings(vsi);\n+\tif (err)\n+\t\tgoto err_setup_tx;\n+\n+\terr = ice_vsi_setup_rx_rings(vsi);\n+\tif (err)\n+\t\tgoto err_setup_rx;\n+\n+\terr = ice_vsi_cfg(vsi);\n+\tif (err)\n+\t\tgoto err_setup_rx;\n+\n+\tsnprintf(int_name, sizeof(int_name) - 1, \"%s-%s\",\n+\t\t dev_driver_string(&pf->pdev->dev), vsi->netdev->name);\n+\terr = ice_vsi_req_irq(vsi, int_name);\n+\tif (err)\n+\t\tgoto err_setup_rx;\n+\n+\t/* Notify the stack of the actual queue counts. */\n+\terr = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);\n+\tif (err)\n+\t\tgoto err_set_qs;\n+\n+\terr = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);\n+\tif (err)\n+\t\tgoto err_set_qs;\n+\n+\terr = ice_up_complete(vsi);\n+\tif (err)\n+\t\tgoto err_up_complete;\n+\n+\treturn 0;\n+\n+err_up_complete:\n+\tice_down(vsi);\n+err_set_qs:\n+\tice_vsi_free_irq(vsi);\n+err_setup_rx:\n+\tice_vsi_free_rx_rings(vsi);\n+err_setup_tx:\n+\tice_vsi_free_tx_rings(vsi);\n+\n+\treturn err;\n+}\n+\n+/**\n+ * ice_vsi_close - Shut down a VSI\n+ * @vsi: the VSI being shut down\n+ */\n+static void ice_vsi_close(struct ice_vsi *vsi)\n+{\n+\tif (!test_and_set_bit(__ICE_DOWN, vsi->state))\n+\t\tice_down(vsi);\n+\n+\tice_vsi_free_irq(vsi);\n+\tice_vsi_free_tx_rings(vsi);\n+\tice_vsi_free_rx_rings(vsi);\n+}\n+\n /**\n  * ice_vsi_release - Delete a VSI and free its resources\n  * @vsi: the VSI being removed\n@@ -2113,6 +3193,9 @@ static int ice_vsi_release(struct ice_vsi *vsi)\n \t\tvsi->netdev = NULL;\n \t}\n \n+\tice_vsi_dis_irq(vsi);\n+\tice_vsi_close(vsi);\n+\n \t/* reclaim interrupt vectors back to PF */\n \tice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);\n \tpf->num_avail_msix += vsi->num_q_vectors;\n@@ -2130,3 +3213,56 @@ static int ice_vsi_release(struct ice_vsi *vsi)\n \n \treturn 0;\n }\n+\n+/**\n+ * ice_open - Called when a network interface becomes active\n+ * @netdev: network interface device structure\n+ *\n+ * The open entry point is called when a network interface is made\n+ * active by the system (IFF_UP).  At this point all resources needed\n+ * for transmit and receive operations are allocated, the interrupt\n+ * handler is registered with the OS, the netdev watchdog is enabled,\n+ * and the stack is notified that the interface is ready.\n+ *\n+ * Returns 0 on success, negative value on failure\n+ */\n+static int ice_open(struct net_device *netdev)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tint err;\n+\n+\tnetif_carrier_off(netdev);\n+\n+\terr = ice_vsi_open(vsi);\n+\n+\tif (err)\n+\t\tnetdev_err(netdev, \"Failed to open VSI 0x%04X on switch 0x%04X\\n\",\n+\t\t\t   vsi->vsi_num, vsi->vsw->sw_id);\n+\treturn err;\n+}\n+\n+/**\n+ * ice_stop - Disables a network interface\n+ * @netdev: network interface device structure\n+ *\n+ * The stop entry point is called when an interface is de-activated by the OS,\n+ * and the netdevice enters the DOWN state.  The hardware is still under the\n+ * driver's control, but the netdev interface is disabled.\n+ *\n+ * Returns success only - not allowed to fail\n+ */\n+static int ice_stop(struct net_device *netdev)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\n+\tice_vsi_close(vsi);\n+\n+\treturn 0;\n+}\n+\n+static const struct net_device_ops ice_netdev_ops = {\n+\t.ndo_open = ice_open,\n+\t.ndo_stop = ice_stop,\n+};\ndiff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c\nindex 0a547141b125..74dbdb3d5df2 100644\n--- a/drivers/net/ethernet/intel/ice/ice_sched.c\n+++ b/drivers/net/ethernet/intel/ice/ice_sched.c\n@@ -476,6 +476,18 @@ void ice_sched_cleanup_all(struct ice_hw *hw)\n \thw->max_cgds = 0;\n }\n \n+/**\n+ * ice_sched_get_qgrp_layer - get the current queue group layer number\n+ * @hw: pointer to the hw struct\n+ *\n+ * This function returns the current queue group layer number\n+ */\n+static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)\n+{\n+\t/* It's always total layers - 1, the array is 0 relative so -2 */\n+\treturn hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;\n+}\n+\n /**\n  * ice_rm_dflt_leaf_node - remove the default leaf node in the tree\n  * @pi: port information structure\n@@ -680,3 +692,96 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)\n \tdevm_kfree(ice_hw_to_dev(hw), buf);\n \treturn status;\n }\n+\n+/**\n+ * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id\n+ * @pi: port information structure\n+ * @vsi_id: vsi id\n+ *\n+ * This function retrieves the vsi list for the given vsi id\n+ */\n+static struct ice_sched_vsi_info *\n+ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)\n+{\n+\tstruct ice_sched_vsi_info *list_elem;\n+\n+\tif (!pi)\n+\t\treturn NULL;\n+\n+\tlist_for_each_entry(list_elem, &pi->vsi_info_list, list_entry)\n+\t\tif (list_elem->vsi_id == vsi_id)\n+\t\t\treturn list_elem;\n+\treturn NULL;\n+}\n+\n+/**\n+ * ice_sched_find_node_in_subtree - Find node in part of base node subtree\n+ * @hw: pointer to the hw struct\n+ * @base: pointer to the base node\n+ * @node: pointer to the node to search\n+ *\n+ * This function checks whether a given node is part of the base node\n+ * subtree or not\n+ */\n+static bool\n+ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,\n+\t\t\t       struct ice_sched_node *node)\n+{\n+\tu8 i;\n+\n+\tfor (i = 0; i < base->num_children; i++) {\n+\t\tstruct ice_sched_node *child = base->children[i];\n+\n+\t\tif (node == child)\n+\t\t\treturn true;\n+\t\tif (child->tx_sched_layer > node->tx_sched_layer)\n+\t\t\treturn false;\n+\t\t/* this recursion is intentional, and wouldn't\n+\t\t * go more than 8 calls\n+\t\t */\n+\t\tif (ice_sched_find_node_in_subtree(hw, child, node))\n+\t\t\treturn true;\n+\t}\n+\treturn false;\n+}\n+\n+/**\n+ * ice_sched_get_free_qparent - Get a free lan or rdma q group node\n+ * @pi: port information structure\n+ * @vsi_id: vsi id\n+ * @tc: branch number\n+ * @owner: lan or rdma\n+ *\n+ * This function retrieves a free lan or rdma q group node\n+ */\n+struct ice_sched_node *\n+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,\n+\t\t\t   u8 owner)\n+{\n+\tstruct ice_sched_node *vsi_node, *qgrp_node = NULL;\n+\tstruct ice_sched_vsi_info *list_elem;\n+\tu16 max_children;\n+\tu8 qgrp_layer;\n+\n+\tqgrp_layer = ice_sched_get_qgrp_layer(pi->hw);\n+\tmax_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children);\n+\tlist_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);\n+\tif (!list_elem)\n+\t\tgoto lan_q_exit;\n+\tvsi_node = list_elem->vsi_node[tc];\n+\t/* validate invalid VSI id */\n+\tif (!vsi_node)\n+\t\tgoto lan_q_exit;\n+\t/* get the first q group node from VSI sub-tree */\n+\tqgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);\n+\twhile (qgrp_node) {\n+\t\t/* make sure the qgroup node is part of the VSI subtree */\n+\t\tif (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))\n+\t\t\tif (qgrp_node->num_children < max_children &&\n+\t\t\t    qgrp_node->owner == owner)\n+\t\t\t\tbreak;\n+\t\tqgrp_node = qgrp_node->sibling;\n+\t}\n+lan_q_exit:\n+\treturn qgrp_node;\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h\nindex 6a9c0ae4220d..a3a9fc14603a 100644\n--- a/drivers/net/ethernet/intel/ice/ice_sched.h\n+++ b/drivers/net/ethernet/intel/ice/ice_sched.h\n@@ -20,6 +20,8 @@\n \n #include \"ice_common.h\"\n \n+#define ICE_QGRP_LAYER_OFFSET\t2\n+\n struct ice_sched_agg_vsi_info {\n \tstruct list_head list_entry;\n \tDECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);\n@@ -45,4 +47,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,\n \t\t   struct ice_aqc_txsched_elem_data *info);\n void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);\n struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);\n+struct ice_sched_node *\n+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,\n+\t\t\t   u8 owner);\n #endif /* _ICE_SCHED_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h\nindex 7367392258a5..9c33c95a4045 100644\n--- a/drivers/net/ethernet/intel/ice/ice_status.h\n+++ b/drivers/net/ethernet/intel/ice/ice_status.h\n@@ -23,6 +23,7 @@ enum ice_status {\n \tICE_ERR_PARAM\t\t\t\t= -1,\n \tICE_ERR_NOT_IMPL\t\t\t= -2,\n \tICE_ERR_NOT_READY\t\t\t= -3,\n+\tICE_ERR_BAD_PTR\t\t\t\t= -5,\n \tICE_ERR_INVAL_SIZE\t\t\t= -6,\n \tICE_ERR_DEVICE_NOT_SUPPORTED\t\t= -8,\n \tICE_ERR_RESET_FAILED\t\t\t= -9,\n@@ -32,6 +33,7 @@ enum ice_status {\n \tICE_ERR_OUT_OF_RANGE\t\t\t= -13,\n \tICE_ERR_ALREADY_EXISTS\t\t\t= -14,\n \tICE_ERR_DOES_NOT_EXIST\t\t\t= -15,\n+\tICE_ERR_MAX_LIMIT\t\t\t= -17,\n \tICE_ERR_BUF_TOO_SHORT\t\t\t= -52,\n \tICE_ERR_NVM_BLANK_MODE\t\t\t= -53,\n \tICE_ERR_AQ_ERROR\t\t\t= -100,\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c\nnew file mode 100644\nindex 000000000000..002c26a4bca6\n--- /dev/null\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c\n@@ -0,0 +1,375 @@\n+// SPDX-License-Identifier: GPL-2.0-only\n+/* Intel(R) Ethernet Connection E800 Series Linux Driver\n+ * Copyright (c) 2018, Intel Corporation.\n+ *\n+ * This program is free software; you can redistribute it and/or modify it\n+ * under the terms and conditions of the GNU General Public License,\n+ * version 2, as published by the Free Software Foundation.\n+ *\n+ * This program is distributed in the hope it will be useful, but WITHOUT\n+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for\n+ * more details.\n+ *\n+ * The full GNU General Public License is included in this distribution in\n+ * the file called \"COPYING\".\n+ */\n+\n+/* The driver transmit and receive code */\n+\n+#include <linux/prefetch.h>\n+#include <linux/mm.h>\n+#include \"ice.h\"\n+\n+/**\n+ * ice_unmap_and_free_tx_buf - Release a Tx buffer\n+ * @ring: the ring that owns the buffer\n+ * @tx_buf: the buffer to free\n+ */\n+static void\n+ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)\n+{\n+\tif (tx_buf->skb) {\n+\t\tdev_kfree_skb_any(tx_buf->skb);\n+\t\tif (dma_unmap_len(tx_buf, len))\n+\t\t\tdma_unmap_single(ring->dev,\n+\t\t\t\t\t dma_unmap_addr(tx_buf, dma),\n+\t\t\t\t\t dma_unmap_len(tx_buf, len),\n+\t\t\t\t\t DMA_TO_DEVICE);\n+\t} else if (dma_unmap_len(tx_buf, len)) {\n+\t\tdma_unmap_page(ring->dev,\n+\t\t\t       dma_unmap_addr(tx_buf, dma),\n+\t\t\t       dma_unmap_len(tx_buf, len),\n+\t\t\t       DMA_TO_DEVICE);\n+\t}\n+\n+\ttx_buf->next_to_watch = NULL;\n+\ttx_buf->skb = NULL;\n+\tdma_unmap_len_set(tx_buf, len, 0);\n+\t/* tx_buf must be completely set up in the transmit path */\n+}\n+\n+static struct netdev_queue *txring_txq(const struct ice_ring *ring)\n+{\n+\treturn netdev_get_tx_queue(ring->netdev, ring->q_index);\n+}\n+\n+/**\n+ * ice_clean_tx_ring - Free any empty Tx buffers\n+ * @tx_ring: ring to be cleaned\n+ */\n+void ice_clean_tx_ring(struct ice_ring *tx_ring)\n+{\n+\tunsigned long size;\n+\tu16 i;\n+\n+\t/* ring already cleared, nothing to do */\n+\tif (!tx_ring->tx_buf)\n+\t\treturn;\n+\n+\t/* Free all the Tx ring sk_bufss */\n+\tfor (i = 0; i < tx_ring->count; i++)\n+\t\tice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);\n+\n+\tsize = sizeof(struct ice_tx_buf) * tx_ring->count;\n+\tmemset(tx_ring->tx_buf, 0, size);\n+\n+\t/* Zero out the descriptor ring */\n+\tmemset(tx_ring->desc, 0, tx_ring->size);\n+\n+\ttx_ring->next_to_use = 0;\n+\ttx_ring->next_to_clean = 0;\n+\n+\tif (!tx_ring->netdev)\n+\t\treturn;\n+\n+\t/* cleanup Tx queue statistics */\n+\tnetdev_tx_reset_queue(txring_txq(tx_ring));\n+}\n+\n+/**\n+ * ice_free_tx_ring - Free Tx resources per queue\n+ * @tx_ring: Tx descriptor ring for a specific queue\n+ *\n+ * Free all transmit software resources\n+ */\n+void ice_free_tx_ring(struct ice_ring *tx_ring)\n+{\n+\tice_clean_tx_ring(tx_ring);\n+\tdevm_kfree(tx_ring->dev, tx_ring->tx_buf);\n+\ttx_ring->tx_buf = NULL;\n+\n+\tif (tx_ring->desc) {\n+\t\tdmam_free_coherent(tx_ring->dev, tx_ring->size,\n+\t\t\t\t   tx_ring->desc, tx_ring->dma);\n+\t\ttx_ring->desc = NULL;\n+\t}\n+}\n+\n+/**\n+ * ice_setup_tx_ring - Allocate the Tx descriptors\n+ * @tx_ring: the tx ring to set up\n+ *\n+ * Return 0 on success, negative on error\n+ */\n+int ice_setup_tx_ring(struct ice_ring *tx_ring)\n+{\n+\tstruct device *dev = tx_ring->dev;\n+\tint bi_size;\n+\n+\tif (!dev)\n+\t\treturn -ENOMEM;\n+\n+\t/* warn if we are about to overwrite the pointer */\n+\tWARN_ON(tx_ring->tx_buf);\n+\tbi_size = sizeof(struct ice_tx_buf) * tx_ring->count;\n+\ttx_ring->tx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL);\n+\tif (!tx_ring->tx_buf)\n+\t\treturn -ENOMEM;\n+\n+\t/* round up to nearest 4K */\n+\ttx_ring->size = tx_ring->count * sizeof(struct ice_tx_desc);\n+\ttx_ring->size = ALIGN(tx_ring->size, 4096);\n+\ttx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,\n+\t\t\t\t\t    GFP_KERNEL);\n+\tif (!tx_ring->desc) {\n+\t\tdev_err(dev, \"Unable to allocate memory for the Tx descriptor ring, size=%d\\n\",\n+\t\t\ttx_ring->size);\n+\t\tgoto err;\n+\t}\n+\n+\ttx_ring->next_to_use = 0;\n+\ttx_ring->next_to_clean = 0;\n+\treturn 0;\n+\n+err:\n+\tdevm_kfree(dev, tx_ring->tx_buf);\n+\ttx_ring->tx_buf = NULL;\n+\treturn -ENOMEM;\n+}\n+\n+/**\n+ * ice_clean_rx_ring - Free Rx buffers\n+ * @rx_ring: ring to be cleaned\n+ */\n+void ice_clean_rx_ring(struct ice_ring *rx_ring)\n+{\n+\tstruct device *dev = rx_ring->dev;\n+\tunsigned long size;\n+\tu16 i;\n+\n+\t/* ring already cleared, nothing to do */\n+\tif (!rx_ring->rx_buf)\n+\t\treturn;\n+\n+\t/* Free all the Rx ring sk_buffs */\n+\tfor (i = 0; i < rx_ring->count; i++) {\n+\t\tstruct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];\n+\n+\t\tif (rx_buf->skb) {\n+\t\t\tdev_kfree_skb(rx_buf->skb);\n+\t\t\trx_buf->skb = NULL;\n+\t\t}\n+\t\tif (!rx_buf->page)\n+\t\t\tcontinue;\n+\n+\t\tdma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);\n+\t\t__free_pages(rx_buf->page, 0);\n+\n+\t\trx_buf->page = NULL;\n+\t\trx_buf->page_offset = 0;\n+\t}\n+\n+\tsize = sizeof(struct ice_rx_buf) * rx_ring->count;\n+\tmemset(rx_ring->rx_buf, 0, size);\n+\n+\t/* Zero out the descriptor ring */\n+\tmemset(rx_ring->desc, 0, rx_ring->size);\n+\n+\trx_ring->next_to_alloc = 0;\n+\trx_ring->next_to_clean = 0;\n+\trx_ring->next_to_use = 0;\n+}\n+\n+/**\n+ * ice_free_rx_ring - Free Rx resources\n+ * @rx_ring: ring to clean the resources from\n+ *\n+ * Free all receive software resources\n+ */\n+void ice_free_rx_ring(struct ice_ring *rx_ring)\n+{\n+\tice_clean_rx_ring(rx_ring);\n+\tdevm_kfree(rx_ring->dev, rx_ring->rx_buf);\n+\trx_ring->rx_buf = NULL;\n+\n+\tif (rx_ring->desc) {\n+\t\tdmam_free_coherent(rx_ring->dev, rx_ring->size,\n+\t\t\t\t   rx_ring->desc, rx_ring->dma);\n+\t\trx_ring->desc = NULL;\n+\t}\n+}\n+\n+/**\n+ * ice_setup_rx_ring - Allocate the Rx descriptors\n+ * @rx_ring: the rx ring to set up\n+ *\n+ * Return 0 on success, negative on error\n+ */\n+int ice_setup_rx_ring(struct ice_ring *rx_ring)\n+{\n+\tstruct device *dev = rx_ring->dev;\n+\tint bi_size;\n+\n+\tif (!dev)\n+\t\treturn -ENOMEM;\n+\n+\t/* warn if we are about to overwrite the pointer */\n+\tWARN_ON(rx_ring->rx_buf);\n+\tbi_size = sizeof(struct ice_rx_buf) * rx_ring->count;\n+\trx_ring->rx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL);\n+\tif (!rx_ring->rx_buf)\n+\t\treturn -ENOMEM;\n+\n+\t/* round up to nearest 4K */\n+\trx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);\n+\trx_ring->size = ALIGN(rx_ring->size, 4096);\n+\trx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,\n+\t\t\t\t\t    GFP_KERNEL);\n+\tif (!rx_ring->desc) {\n+\t\tdev_err(dev, \"Unable to allocate memory for the Rx descriptor ring, size=%d\\n\",\n+\t\t\trx_ring->size);\n+\t\tgoto err;\n+\t}\n+\n+\trx_ring->next_to_use = 0;\n+\trx_ring->next_to_clean = 0;\n+\treturn 0;\n+\n+err:\n+\tdevm_kfree(dev, rx_ring->rx_buf);\n+\trx_ring->rx_buf = NULL;\n+\treturn -ENOMEM;\n+}\n+\n+/**\n+ * ice_release_rx_desc - Store the new tail and head values\n+ * @rx_ring: ring to bump\n+ * @val: new head index\n+ */\n+static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)\n+{\n+\trx_ring->next_to_use = val;\n+\n+\t/* update next to alloc since we have filled the ring */\n+\trx_ring->next_to_alloc = val;\n+\n+\t/* Force memory writes to complete before letting h/w\n+\t * know there are new descriptors to fetch.  (Only\n+\t * applicable for weak-ordered memory model archs,\n+\t * such as IA-64).\n+\t */\n+\twmb();\n+\twritel(val, rx_ring->tail);\n+}\n+\n+/**\n+ * ice_alloc_mapped_page - recycle or make a new page\n+ * @rx_ring: ring to use\n+ * @bi: rx_buf struct to modify\n+ *\n+ * Returns true if the page was successfully allocated or\n+ * reused.\n+ */\n+static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,\n+\t\t\t\t  struct ice_rx_buf *bi)\n+{\n+\tstruct page *page = bi->page;\n+\tdma_addr_t dma;\n+\n+\t/* since we are recycling buffers we should seldom need to alloc */\n+\tif (likely(page))\n+\t\treturn true;\n+\n+\t/* alloc new page for storage */\n+\tpage = alloc_page(GFP_ATOMIC | __GFP_NOWARN);\n+\tif (unlikely(!page))\n+\t\treturn false;\n+\n+\t/* map page for use */\n+\tdma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);\n+\n+\t/* if mapping failed free memory back to system since\n+\t * there isn't much point in holding memory we can't use\n+\t */\n+\tif (dma_mapping_error(rx_ring->dev, dma)) {\n+\t\t__free_pages(page, 0);\n+\t\treturn false;\n+\t}\n+\n+\tbi->dma = dma;\n+\tbi->page = page;\n+\tbi->page_offset = 0;\n+\n+\treturn true;\n+}\n+\n+/**\n+ * ice_alloc_rx_bufs - Replace used receive buffers\n+ * @rx_ring: ring to place buffers on\n+ * @cleaned_count: number of buffers to replace\n+ *\n+ * Returns false if all allocations were successful, true if any fail\n+ */\n+bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)\n+{\n+\tunion ice_32b_rx_flex_desc *rx_desc;\n+\tu16 ntu = rx_ring->next_to_use;\n+\tstruct ice_rx_buf *bi;\n+\n+\t/* do nothing if no valid netdev defined */\n+\tif (!rx_ring->netdev || !cleaned_count)\n+\t\treturn false;\n+\n+\t/* get the RX descriptor and buffer based on next_to_use */\n+\trx_desc = ICE_RX_DESC(rx_ring, ntu);\n+\tbi = &rx_ring->rx_buf[ntu];\n+\n+\tdo {\n+\t\tif (!ice_alloc_mapped_page(rx_ring, bi))\n+\t\t\tgoto no_bufs;\n+\n+\t\t/* Refresh the desc even if buffer_addrs didn't change\n+\t\t * because each write-back erases this info.\n+\t\t */\n+\t\trx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);\n+\n+\t\trx_desc++;\n+\t\tbi++;\n+\t\tntu++;\n+\t\tif (unlikely(ntu == rx_ring->count)) {\n+\t\t\trx_desc = ICE_RX_DESC(rx_ring, 0);\n+\t\t\tbi = rx_ring->rx_buf;\n+\t\t\tntu = 0;\n+\t\t}\n+\n+\t\t/* clear the status bits for the next_to_use descriptor */\n+\t\trx_desc->wb.status_error0 = 0;\n+\n+\t\tcleaned_count--;\n+\t} while (cleaned_count);\n+\n+\tif (rx_ring->next_to_use != ntu)\n+\t\tice_release_rx_desc(rx_ring, ntu);\n+\n+\treturn false;\n+\n+no_bufs:\n+\tif (rx_ring->next_to_use != ntu)\n+\t\tice_release_rx_desc(rx_ring, ntu);\n+\n+\t/* make sure to come back via polling to try again after\n+\t * allocation failure\n+\t */\n+\treturn true;\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h\nindex b5ec4fb88aa8..367bfc6fa485 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.h\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h\n@@ -19,6 +19,30 @@\n #define _ICE_TXRX_H_\n \n #define ICE_DFLT_IRQ_WORK\t256\n+#define ICE_RXBUF_2048\t\t2048\n+#define ICE_MAX_CHAINED_RX_BUFS\t5\n+#define ICE_MAX_TXQ_PER_TXQG\t128\n+\n+#define ICE_DESC_UNUSED(R)\t\\\n+\t((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \\\n+\t(R)->next_to_clean - (R)->next_to_use - 1)\n+\n+struct ice_tx_buf {\n+\tstruct ice_tx_desc *next_to_watch;\n+\tstruct sk_buff *skb;\n+\tunsigned int bytecount;\n+\tunsigned short gso_segs;\n+\tu32 tx_flags;\n+\tDEFINE_DMA_UNMAP_ADDR(dma);\n+\tDEFINE_DMA_UNMAP_LEN(len);\n+};\n+\n+struct ice_rx_buf {\n+\tstruct sk_buff *skb;\n+\tdma_addr_t dma;\n+\tstruct page *page;\n+\tunsigned int page_offset;\n+};\n \n /* this enum matches hardware bits and is meant to be used by DYN_CTLN\n  * registers and QINT registers or more generally anywhere in the manual\n@@ -32,33 +56,77 @@ enum ice_dyn_idx_t {\n \tICE_ITR_NONE = 3\t/* ITR_NONE must not be used as an index */\n };\n \n+/* Header split modes defined by DTYPE field of Rx RLAN context */\n+enum ice_rx_dtype {\n+\tICE_RX_DTYPE_NO_SPLIT\t\t= 0,\n+\tICE_RX_DTYPE_HEADER_SPLIT\t= 1,\n+\tICE_RX_DTYPE_SPLIT_ALWAYS\t= 2,\n+};\n+\n /* indices into GLINT_ITR registers */\n #define ICE_RX_ITR\tICE_IDX_ITR0\n+#define ICE_TX_ITR\tICE_IDX_ITR1\n #define ICE_ITR_DYNAMIC\t0x8000  /* use top bit as a flag */\n #define ICE_ITR_8K\t0x003E\n \n /* apply ITR HW granularity translation to program the HW registers */\n #define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))\n \n+/* Legacy or Advanced Mode Queue */\n+#define ICE_TX_ADVANCED\t0\n+#define ICE_TX_LEGACY\t1\n+\n /* descriptor ring, associated with a VSI */\n struct ice_ring {\n \tstruct ice_ring *next;\t\t/* pointer to next ring in q_vector */\n+\tvoid *desc;\t\t\t/* Descriptor ring memory */\n \tstruct device *dev;\t\t/* Used for DMA mapping */\n \tstruct net_device *netdev;\t/* netdev ring maps to */\n \tstruct ice_vsi *vsi;\t\t/* Backreference to associated VSI */\n \tstruct ice_q_vector *q_vector;\t/* Backreference to associated vector */\n+\tu8 __iomem *tail;\n+\tunion {\n+\t\tstruct ice_tx_buf *tx_buf;\n+\t\tstruct ice_rx_buf *rx_buf;\n+\t};\n \tu16 q_index;\t\t\t/* Queue number of ring */\n+\tu32 txq_teid;\t\t\t/* Added Tx queue TEID */\n+\n+\t/* high bit set means dynamic, use accessor routines to read/write.\n+\t * hardware supports 2us/1us resolution for the ITR registers.\n+\t * these values always store the USER setting, and must be converted\n+\t * before programming to a register.\n+\t */\n+\tu16 rx_itr_setting;\n+\tu16 tx_itr_setting;\n+\n \tu16 count;\t\t\t/* Number of descriptors */\n \tu16 reg_idx;\t\t\t/* HW register index of the ring */\n+\n+\t/* used in interrupt processing */\n+\tu16 next_to_use;\n+\tu16 next_to_clean;\n+\n \tbool ring_active;\t\t/* is ring online or not */\n+\tunsigned int size;\t\t/* length of descriptor ring in bytes */\n+\tdma_addr_t dma;\t\t\t/* physical address of ring */\n \tstruct rcu_head rcu;\t\t/* to avoid race on free */\n+\tu16 next_to_alloc;\n } ____cacheline_internodealigned_in_smp;\n \n+enum ice_latency_range {\n+\tICE_LOWEST_LATENCY = 0,\n+\tICE_LOW_LATENCY = 1,\n+\tICE_BULK_LATENCY = 2,\n+\tICE_ULTRA_LATENCY = 3,\n+};\n+\n struct ice_ring_container {\n \t/* array of pointers to rings */\n \tstruct ice_ring *ring;\n \tunsigned int total_bytes;\t/* total bytes processed this int */\n \tunsigned int total_pkts;\t/* total packets processed this int */\n+\tenum ice_latency_range latency_range;\n \tu16 itr;\n };\n \n@@ -66,4 +134,11 @@ struct ice_ring_container {\n #define ice_for_each_ring(pos, head) \\\n \tfor (pos = (head).ring; pos; pos = pos->next)\n \n+bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);\n+void ice_clean_tx_ring(struct ice_ring *tx_ring);\n+void ice_clean_rx_ring(struct ice_ring *rx_ring);\n+int ice_setup_tx_ring(struct ice_ring *tx_ring);\n+int ice_setup_rx_ring(struct ice_ring *rx_ring);\n+void ice_free_tx_ring(struct ice_ring *tx_ring);\n+void ice_free_rx_ring(struct ice_ring *rx_ring);\n #endif /* _ICE_TXRX_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h\nindex 796c3d0a3c47..2f1ad8cb2456 100644\n--- a/drivers/net/ethernet/intel/ice/ice_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_type.h\n@@ -22,9 +22,11 @@\n #include \"ice_hw_autogen.h\"\n #include \"ice_osdep.h\"\n #include \"ice_controlq.h\"\n+#include \"ice_lan_tx_rx.h\"\n \n /* debug masks - set these bits in hw->debug_mask to control output */\n #define ICE_DBG_INIT\t\tBIT_ULL(1)\n+#define ICE_DBG_QCTX\t\tBIT_ULL(6)\n #define ICE_DBG_NVM\t\tBIT_ULL(7)\n #define ICE_DBG_LAN\t\tBIT_ULL(8)\n #define ICE_DBG_SW\t\tBIT_ULL(13)\n",
    "prefixes": [
        "v2",
        "09/15"
    ]
}