get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/970207/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 970207,
    "url": "http://patchwork.ozlabs.org/api/patches/970207/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180915003757.169108-13-jesse.brandeburg@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20180915003757.169108-13-jesse.brandeburg@intel.com>",
    "list_archive_url": null,
    "date": "2018-09-15T00:37:55",
    "name": "[net-next,v2,12/14] iavf: rename most of i40e strings",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "378ec9f58385e041cde0084c0d9c6d8856d0e2a5",
    "submitter": {
        "id": 189,
        "url": "http://patchwork.ozlabs.org/api/people/189/?format=api",
        "name": "Jesse Brandeburg",
        "email": "jesse.brandeburg@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180915003757.169108-13-jesse.brandeburg@intel.com/mbox/",
    "series": [
        {
            "id": 65816,
            "url": "http://patchwork.ozlabs.org/api/series/65816/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=65816",
            "date": "2018-09-15T00:37:43",
            "name": "[net-next,v2,01/14] intel-ethernet: rename i40evf to iavf",
            "version": 2,
            "mbox": "http://patchwork.ozlabs.org/series/65816/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/970207/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/970207/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.136; helo=silver.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 42C7Dn3r4gz9sCS\n\tfor <incoming@patchwork.ozlabs.org>;\n\tSat, 15 Sep 2018 20:00:41 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id E5B3F22002;\n\tSat, 15 Sep 2018 10:00:39 +0000 (UTC)",
            "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id CWdS33-wA5ws; Sat, 15 Sep 2018 10:00:15 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby silver.osuosl.org (Postfix) with ESMTP id 8F96F21558;\n\tSat, 15 Sep 2018 10:00:15 +0000 (UTC)",
            "from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n\tby ash.osuosl.org (Postfix) with ESMTP id ACC581C2E93\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 15 Sep 2018 00:38:13 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id A79E7877A7\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 15 Sep 2018 00:38:13 +0000 (UTC)",
            "from fraxinus.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id WjM2Ear1EH9N for <intel-wired-lan@lists.osuosl.org>;\n\tSat, 15 Sep 2018 00:38:02 +0000 (UTC)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n\tby fraxinus.osuosl.org (Postfix) with ESMTPS id B558387859\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 15 Sep 2018 00:38:00 +0000 (UTC)",
            "from fmsmga001.fm.intel.com ([10.253.24.23])\n\tby fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t14 Sep 2018 17:38:00 -0700",
            "from jfsjbrandeb002.jf.intel.com ([10.166.241.63])\n\tby fmsmga001.fm.intel.com with ESMTP; 14 Sep 2018 17:37:59 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.53,375,1531810800\"; d=\"scan'208\";a=\"90189579\"",
        "From": "Jesse Brandeburg <jesse.brandeburg@intel.com>",
        "To": "netdev@vger.kernel.org,\n\tintel-wired-lan@lists.osuosl.org",
        "Date": "Fri, 14 Sep 2018 17:37:55 -0700",
        "Message-Id": "<20180915003757.169108-13-jesse.brandeburg@intel.com>",
        "X-Mailer": "git-send-email 2.14.4",
        "In-Reply-To": "<20180915003757.169108-1-jesse.brandeburg@intel.com>",
        "References": "<20180915003757.169108-1-jesse.brandeburg@intel.com>",
        "X-Mailman-Approved-At": "Sat, 15 Sep 2018 10:00:15 +0000",
        "Subject": "[Intel-wired-lan] [PATCH net-next v2 12/14] iavf: rename most of\n\ti40e strings",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.24",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "This is the big rename patch, it takes most of the i40e_\nand I40E_ strings and renames them to iavf_ and IAVF_.\n\nSome of the adminq code, as well as most of the client\ninterface code used by RDMA is left unchanged in order\nto indicate that the driver is talking to non-internal to\niavf code.\n\nSigned-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>\n---\n drivers/net/ethernet/intel/iavf/i40e_adminq.c     |  96 ++--\n drivers/net/ethernet/intel/iavf/i40e_adminq.h     |  24 +-\n drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h |   4 +-\n drivers/net/ethernet/intel/iavf/i40e_alloc.h      |  38 +-\n drivers/net/ethernet/intel/iavf/i40e_common.c     | 590 ++++++++++----------\n drivers/net/ethernet/intel/iavf/i40e_osdep.h      |  20 +-\n drivers/net/ethernet/intel/iavf/i40e_prototype.h  |  13 +-\n drivers/net/ethernet/intel/iavf/i40e_status.h     |   8 +-\n drivers/net/ethernet/intel/iavf/i40e_type.h       | 633 +++++++++++-----------\n drivers/net/ethernet/intel/iavf/iavf.h            |  59 +-\n drivers/net/ethernet/intel/iavf/iavf_client.c     |  26 +-\n drivers/net/ethernet/intel/iavf/iavf_ethtool.c    | 142 +++--\n drivers/net/ethernet/intel/iavf/iavf_main.c       | 107 ++--\n drivers/net/ethernet/intel/iavf/iavf_trace.h      |  36 +-\n drivers/net/ethernet/intel/iavf/iavf_txrx.c       | 616 ++++++++++-----------\n drivers/net/ethernet/intel/iavf/iavf_txrx.h       | 341 ++++++------\n drivers/net/ethernet/intel/iavf/iavf_virtchnl.c   |  20 +-\n 17 files changed, 1366 insertions(+), 1407 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.c b/drivers/net/ethernet/intel/iavf/i40e_adminq.c\nindex d614722fbb3d..8aa817808cd5 100644\n--- a/drivers/net/ethernet/intel/iavf/i40e_adminq.c\n+++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.c\n@@ -36,7 +36,7 @@ static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw)\n {\n \tiavf_status ret_code;\n \n-\tret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,\n+\tret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,\n \t\t\t\t\t i40e_mem_atq_ring,\n \t\t\t\t\t (hw->aq.num_asq_entries *\n \t\t\t\t\t sizeof(struct i40e_aq_desc)),\n@@ -44,11 +44,11 @@ static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw)\n \tif (ret_code)\n \t\treturn ret_code;\n \n-\tret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,\n+\tret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,\n \t\t\t\t\t  (hw->aq.num_asq_entries *\n \t\t\t\t\t  sizeof(struct i40e_asq_cmd_details)));\n \tif (ret_code) {\n-\t\ti40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);\n+\t\tiavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);\n \t\treturn ret_code;\n \t}\n \n@@ -63,7 +63,7 @@ static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw)\n {\n \tiavf_status ret_code;\n \n-\tret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,\n+\tret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,\n \t\t\t\t\t i40e_mem_arq_ring,\n \t\t\t\t\t (hw->aq.num_arq_entries *\n \t\t\t\t\t sizeof(struct i40e_aq_desc)),\n@@ -81,7 +81,7 @@ static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw)\n  **/\n static void i40e_free_adminq_asq(struct iavf_hw *hw)\n {\n-\ti40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);\n+\tiavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);\n }\n \n /**\n@@ -93,7 +93,7 @@ static void i40e_free_adminq_asq(struct iavf_hw *hw)\n  **/\n static void i40e_free_adminq_arq(struct iavf_hw *hw)\n {\n-\ti40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);\n+\tiavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);\n }\n \n /**\n@@ -104,7 +104,7 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)\n {\n \tiavf_status ret_code;\n \tstruct i40e_aq_desc *desc;\n-\tstruct i40e_dma_mem *bi;\n+\tstruct iavf_dma_mem *bi;\n \tint i;\n \n \t/* We'll be allocating the buffer info memory first, then we can\n@@ -112,16 +112,16 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)\n \t */\n \n \t/* buffer_info structures do not need alignment */\n-\tret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,\n-\t\t\t\t\t  (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));\n+\tret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,\n+\t\t\t\t\t  (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));\n \tif (ret_code)\n \t\tgoto alloc_arq_bufs;\n-\thw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;\n+\thw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;\n \n \t/* allocate the mapped buffers */\n \tfor (i = 0; i < hw->aq.num_arq_entries; i++) {\n \t\tbi = &hw->aq.arq.r.arq_bi[i];\n-\t\tret_code = i40e_allocate_dma_mem(hw, bi,\n+\t\tret_code = iavf_allocate_dma_mem(hw, bi,\n \t\t\t\t\t\t i40e_mem_arq_buf,\n \t\t\t\t\t\t hw->aq.arq_buf_size,\n \t\t\t\t\t\t IAVF_ADMINQ_DESC_ALIGNMENT);\n@@ -157,8 +157,8 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)\n \t/* don't try to free the one that failed... */\n \ti--;\n \tfor (; i >= 0; i--)\n-\t\ti40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);\n-\ti40e_free_virt_mem(hw, &hw->aq.arq.dma_head);\n+\t\tiavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);\n+\tiavf_free_virt_mem(hw, &hw->aq.arq.dma_head);\n \n \treturn ret_code;\n }\n@@ -170,20 +170,20 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)\n static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw)\n {\n \tiavf_status ret_code;\n-\tstruct i40e_dma_mem *bi;\n+\tstruct iavf_dma_mem *bi;\n \tint i;\n \n \t/* No mapped memory needed yet, just the buffer info structures */\n-\tret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,\n-\t\t\t\t\t  (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));\n+\tret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,\n+\t\t\t\t\t  (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));\n \tif (ret_code)\n \t\tgoto alloc_asq_bufs;\n-\thw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;\n+\thw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;\n \n \t/* allocate the mapped buffers */\n \tfor (i = 0; i < hw->aq.num_asq_entries; i++) {\n \t\tbi = &hw->aq.asq.r.asq_bi[i];\n-\t\tret_code = i40e_allocate_dma_mem(hw, bi,\n+\t\tret_code = iavf_allocate_dma_mem(hw, bi,\n \t\t\t\t\t\t i40e_mem_asq_buf,\n \t\t\t\t\t\t hw->aq.asq_buf_size,\n \t\t\t\t\t\t IAVF_ADMINQ_DESC_ALIGNMENT);\n@@ -197,8 +197,8 @@ static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw)\n \t/* don't try to free the one that failed... */\n \ti--;\n \tfor (; i >= 0; i--)\n-\t\ti40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);\n-\ti40e_free_virt_mem(hw, &hw->aq.asq.dma_head);\n+\t\tiavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);\n+\tiavf_free_virt_mem(hw, &hw->aq.asq.dma_head);\n \n \treturn ret_code;\n }\n@@ -213,13 +213,13 @@ static void i40e_free_arq_bufs(struct iavf_hw *hw)\n \n \t/* free descriptors */\n \tfor (i = 0; i < hw->aq.num_arq_entries; i++)\n-\t\ti40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);\n+\t\tiavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);\n \n \t/* free the descriptor memory */\n-\ti40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);\n+\tiavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);\n \n \t/* free the dma header */\n-\ti40e_free_virt_mem(hw, &hw->aq.arq.dma_head);\n+\tiavf_free_virt_mem(hw, &hw->aq.arq.dma_head);\n }\n \n /**\n@@ -233,16 +233,16 @@ static void i40e_free_asq_bufs(struct iavf_hw *hw)\n \t/* only unmap if the address is non-NULL */\n \tfor (i = 0; i < hw->aq.num_asq_entries; i++)\n \t\tif (hw->aq.asq.r.asq_bi[i].pa)\n-\t\t\ti40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);\n+\t\t\tiavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);\n \n \t/* free the buffer info list */\n-\ti40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);\n+\tiavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);\n \n \t/* free the descriptor memory */\n-\ti40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);\n+\tiavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);\n \n \t/* free the dma header */\n-\ti40e_free_virt_mem(hw, &hw->aq.asq.dma_head);\n+\tiavf_free_virt_mem(hw, &hw->aq.asq.dma_head);\n }\n \n /**\n@@ -568,7 +568,7 @@ iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)\n  **/\n static u16 i40e_clean_asq(struct iavf_hw *hw)\n {\n-\tstruct i40e_adminq_ring *asq = &hw->aq.asq;\n+\tstruct iavf_adminq_ring *asq = &hw->aq.asq;\n \tstruct i40e_asq_cmd_details *details;\n \tu16 ntc = asq->next_to_clean;\n \tstruct i40e_aq_desc desc_cb;\n@@ -577,7 +577,7 @@ static u16 i40e_clean_asq(struct iavf_hw *hw)\n \tdesc = IAVF_ADMINQ_DESC(*asq, ntc);\n \tdetails = I40E_ADMINQ_DETAILS(*asq, ntc);\n \twhile (rd32(hw, hw->aq.asq.head) != ntc) {\n-\t\tiavf_debug(hw, I40E_DEBUG_AQ_MESSAGE,\n+\t\tiavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,\n \t\t\t   \"ntc %d head %d.\\n\", ntc, rd32(hw, hw->aq.asq.head));\n \n \t\tif (details->callback) {\n@@ -598,7 +598,7 @@ static u16 i40e_clean_asq(struct iavf_hw *hw)\n \n \tasq->next_to_clean = ntc;\n \n-\treturn I40E_DESC_UNUSED(asq);\n+\treturn IAVF_DESC_UNUSED(asq);\n }\n \n /**\n@@ -632,7 +632,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,\n \t\t\t\t  u16  buff_size,\n \t\t\t\t  struct i40e_asq_cmd_details *cmd_details)\n {\n-\tstruct i40e_dma_mem *dma_buff = NULL;\n+\tstruct iavf_dma_mem *dma_buff = NULL;\n \tstruct i40e_asq_cmd_details *details;\n \tstruct i40e_aq_desc *desc_on_ring;\n \tbool cmd_completed = false;\n@@ -643,7 +643,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,\n \tmutex_lock(&hw->aq.asq_mutex);\n \n \tif (hw->aq.asq.count == 0) {\n-\t\tiavf_debug(hw, I40E_DEBUG_AQ_MESSAGE,\n+\t\tiavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,\n \t\t\t   \"AQTX: Admin queue not initialized.\\n\");\n \t\tstatus = I40E_ERR_QUEUE_EMPTY;\n \t\tgoto asq_send_command_error;\n@@ -653,7 +653,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,\n \n \tval = rd32(hw, hw->aq.asq.head);\n \tif (val >= hw->aq.num_asq_entries) {\n-\t\tiavf_debug(hw, I40E_DEBUG_AQ_MESSAGE,\n+\t\tiavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,\n \t\t\t   \"AQTX: head overrun at %d\\n\", val);\n \t\tstatus = I40E_ERR_QUEUE_EMPTY;\n \t\tgoto asq_send_command_error;\n@@ -683,7 +683,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,\n \n \tif (buff_size > hw->aq.asq_buf_size) {\n \t\tiavf_debug(hw,\n-\t\t\t   I40E_DEBUG_AQ_MESSAGE,\n+\t\t\t   IAVF_DEBUG_AQ_MESSAGE,\n \t\t\t   \"AQTX: Invalid buffer size: %d.\\n\",\n \t\t\t   buff_size);\n \t\tstatus = I40E_ERR_INVALID_SIZE;\n@@ -692,7 +692,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,\n \n \tif (details->postpone && !details->async) {\n \t\tiavf_debug(hw,\n-\t\t\t   I40E_DEBUG_AQ_MESSAGE,\n+\t\t\t   IAVF_DEBUG_AQ_MESSAGE,\n \t\t\t   \"AQTX: Async flag not set along with postpone flag\");\n \t\tstatus = I40E_ERR_PARAM;\n \t\tgoto asq_send_command_error;\n@@ -707,7 +707,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,\n \t */\n \tif (i40e_clean_asq(hw) == 0) {\n \t\tiavf_debug(hw,\n-\t\t\t   I40E_DEBUG_AQ_MESSAGE,\n+\t\t\t   IAVF_DEBUG_AQ_MESSAGE,\n \t\t\t   \"AQTX: Error queue is full.\\n\");\n \t\tstatus = I40E_ERR_ADMIN_QUEUE_FULL;\n \t\tgoto asq_send_command_error;\n@@ -736,8 +736,8 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,\n \t}\n \n \t/* bump the tail */\n-\tiavf_debug(hw, I40E_DEBUG_AQ_MESSAGE, \"AQTX: desc and buffer:\\n\");\n-\tiavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,\n+\tiavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, \"AQTX: desc and buffer:\\n\");\n+\tiavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,\n \t\t      buff, buff_size);\n \t(hw->aq.asq.next_to_use)++;\n \tif (hw->aq.asq.next_to_use == hw->aq.asq.count)\n@@ -770,7 +770,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,\n \t\tretval = le16_to_cpu(desc->retval);\n \t\tif (retval != 0) {\n \t\t\tiavf_debug(hw,\n-\t\t\t\t   I40E_DEBUG_AQ_MESSAGE,\n+\t\t\t\t   IAVF_DEBUG_AQ_MESSAGE,\n \t\t\t\t   \"AQTX: Command completed with error 0x%X.\\n\",\n \t\t\t\t   retval);\n \n@@ -787,9 +787,9 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,\n \t\thw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;\n \t}\n \n-\tiavf_debug(hw, I40E_DEBUG_AQ_MESSAGE,\n+\tiavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,\n \t\t   \"AQTX: desc and buffer writeback:\\n\");\n-\tiavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);\n+\tiavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);\n \n \t/* save writeback aq if requested */\n \tif (details->wb_desc)\n@@ -799,11 +799,11 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,\n \tif ((!cmd_completed) &&\n \t    (!details->async && !details->postpone)) {\n \t\tif (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {\n-\t\t\tiavf_debug(hw, I40E_DEBUG_AQ_MESSAGE,\n+\t\t\tiavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,\n \t\t\t\t   \"AQTX: AQ Critical error.\\n\");\n \t\t\tstatus = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;\n \t\t} else {\n-\t\t\tiavf_debug(hw, I40E_DEBUG_AQ_MESSAGE,\n+\t\t\tiavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,\n \t\t\t\t   \"AQTX: Writeback timeout.\\n\");\n \t\t\tstatus = I40E_ERR_ADMIN_QUEUE_TIMEOUT;\n \t\t}\n@@ -846,7 +846,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,\n \tu16 ntc = hw->aq.arq.next_to_clean;\n \tstruct i40e_aq_desc *desc;\n \tiavf_status ret_code = 0;\n-\tstruct i40e_dma_mem *bi;\n+\tstruct iavf_dma_mem *bi;\n \tu16 desc_idx;\n \tu16 datalen;\n \tu16 flags;\n@@ -859,7 +859,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,\n \tmutex_lock(&hw->aq.arq_mutex);\n \n \tif (hw->aq.arq.count == 0) {\n-\t\tiavf_debug(hw, I40E_DEBUG_AQ_MESSAGE,\n+\t\tiavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,\n \t\t\t   \"AQRX: Admin queue not initialized.\\n\");\n \t\tret_code = I40E_ERR_QUEUE_EMPTY;\n \t\tgoto clean_arq_element_err;\n@@ -883,7 +883,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,\n \tif (flags & I40E_AQ_FLAG_ERR) {\n \t\tret_code = I40E_ERR_ADMIN_QUEUE_ERROR;\n \t\tiavf_debug(hw,\n-\t\t\t   I40E_DEBUG_AQ_MESSAGE,\n+\t\t\t   IAVF_DEBUG_AQ_MESSAGE,\n \t\t\t   \"AQRX: Event received with error 0x%X.\\n\",\n \t\t\t   hw->aq.arq_last_status);\n \t}\n@@ -895,8 +895,8 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,\n \t\tmemcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,\n \t\t       e->msg_len);\n \n-\tiavf_debug(hw, I40E_DEBUG_AQ_MESSAGE, \"AQRX: desc and buffer:\\n\");\n-\tiavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,\n+\tiavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, \"AQRX: desc and buffer:\\n\");\n+\tiavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,\n \t\t      hw->aq.arq_buf_size);\n \n \t/* Restore the original datalen and buffer address in the desc,\ndiff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.h b/drivers/net/ethernet/intel/iavf/i40e_adminq.h\nindex fd162a293c38..e34625e25589 100644\n--- a/drivers/net/ethernet/intel/iavf/i40e_adminq.h\n+++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.h\n@@ -1,8 +1,8 @@\n /* SPDX-License-Identifier: GPL-2.0 */\n /* Copyright(c) 2013 - 2018 Intel Corporation. */\n \n-#ifndef _I40E_ADMINQ_H_\n-#define _I40E_ADMINQ_H_\n+#ifndef _IAVF_ADMINQ_H_\n+#define _IAVF_ADMINQ_H_\n \n #include \"i40e_osdep.h\"\n #include \"i40e_status.h\"\n@@ -13,14 +13,14 @@\n \n #define IAVF_ADMINQ_DESC_ALIGNMENT 4096\n \n-struct i40e_adminq_ring {\n-\tstruct i40e_virt_mem dma_head;\t/* space for dma structures */\n-\tstruct i40e_dma_mem desc_buf;\t/* descriptor ring memory */\n-\tstruct i40e_virt_mem cmd_buf;\t/* command buffer memory */\n+struct iavf_adminq_ring {\n+\tstruct iavf_virt_mem dma_head;\t/* space for dma structures */\n+\tstruct iavf_dma_mem desc_buf;\t/* descriptor ring memory */\n+\tstruct iavf_virt_mem cmd_buf;\t/* command buffer memory */\n \n \tunion {\n-\t\tstruct i40e_dma_mem *asq_bi;\n-\t\tstruct i40e_dma_mem *arq_bi;\n+\t\tstruct iavf_dma_mem *asq_bi;\n+\t\tstruct iavf_dma_mem *arq_bi;\n \t} r;\n \n \tu16 count;\t\t/* Number of descriptors */\n@@ -61,9 +61,9 @@ struct i40e_arq_event_info {\n };\n \n /* Admin Queue information */\n-struct i40e_adminq_info {\n-\tstruct i40e_adminq_ring arq;    /* receive queue */\n-\tstruct i40e_adminq_ring asq;    /* send queue */\n+struct iavf_adminq_info {\n+\tstruct iavf_adminq_ring arq;    /* receive queue */\n+\tstruct iavf_adminq_ring asq;    /* send queue */\n \tu32 asq_cmd_timeout;            /* send queue cmd write back timeout*/\n \tu16 num_arq_entries;            /* receive queue depth */\n \tu16 num_asq_entries;            /* send queue depth */\n@@ -132,4 +132,4 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)\n \n void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode);\n \n-#endif /* _I40E_ADMINQ_H_ */\n+#endif /* _IAVF_ADMINQ_H_ */\ndiff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h\nindex 493bdc5331f7..af4f94a6541e 100644\n--- a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h\n+++ b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h\n@@ -5,7 +5,9 @@\n #define _I40E_ADMINQ_CMD_H_\n \n /* This header file defines the i40e Admin Queue commands and is shared between\n- * i40e Firmware and Software.\n+ * i40e Firmware and Software.  Do not change the names in this file to IAVF\n+ * because this file should be diff-able against the i40e version, even\n+ * though many parts have been removed in this VF version.\n  *\n  * This file needs to comply with the Linux Kernel coding style.\n  */\ndiff --git a/drivers/net/ethernet/intel/iavf/i40e_alloc.h b/drivers/net/ethernet/intel/iavf/i40e_alloc.h\nindex e70d805dde42..3757a0a6a0fe 100644\n--- a/drivers/net/ethernet/intel/iavf/i40e_alloc.h\n+++ b/drivers/net/ethernet/intel/iavf/i40e_alloc.h\n@@ -1,30 +1,30 @@\n /* SPDX-License-Identifier: GPL-2.0 */\n /* Copyright(c) 2013 - 2018 Intel Corporation. */\n \n-#ifndef _I40E_ALLOC_H_\n-#define _I40E_ALLOC_H_\n+#ifndef _IAVF_ALLOC_H_\n+#define _IAVF_ALLOC_H_\n \n struct iavf_hw;\n \n /* Memory allocation types */\n-enum i40e_memory_type {\n-\ti40e_mem_arq_buf = 0,\t\t/* ARQ indirect command buffer */\n-\ti40e_mem_asq_buf = 1,\n-\ti40e_mem_atq_buf = 2,\t\t/* ATQ indirect command buffer */\n-\ti40e_mem_arq_ring = 3,\t\t/* ARQ descriptor ring */\n-\ti40e_mem_atq_ring = 4,\t\t/* ATQ descriptor ring */\n-\ti40e_mem_pd = 5,\t\t/* Page Descriptor */\n-\ti40e_mem_bp = 6,\t\t/* Backing Page - 4KB */\n-\ti40e_mem_bp_jumbo = 7,\t\t/* Backing Page - > 4KB */\n-\ti40e_mem_reserved\n+enum iavf_memory_type {\n+\tiavf_mem_arq_buf = 0,\t\t/* ARQ indirect command buffer */\n+\tiavf_mem_asq_buf = 1,\n+\tiavf_mem_atq_buf = 2,\t\t/* ATQ indirect command buffer */\n+\tiavf_mem_arq_ring = 3,\t\t/* ARQ descriptor ring */\n+\tiavf_mem_atq_ring = 4,\t\t/* ATQ descriptor ring */\n+\tiavf_mem_pd = 5,\t\t/* Page Descriptor */\n+\tiavf_mem_bp = 6,\t\t/* Backing Page - 4KB */\n+\tiavf_mem_bp_jumbo = 7,\t\t/* Backing Page - > 4KB */\n+\tiavf_mem_reserved\n };\n \n /* prototype for functions used for dynamic memory allocation */\n-iavf_status i40e_allocate_dma_mem(struct iavf_hw *hw, struct i40e_dma_mem *mem,\n-\t\t\t\t  enum i40e_memory_type type, u64 size,\n+iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem,\n+\t\t\t\t  enum iavf_memory_type type, u64 size,\n \t\t\t\t  u32 alignment);\n-iavf_status i40e_free_dma_mem(struct iavf_hw *hw, struct i40e_dma_mem *mem);\n-iavf_status i40e_allocate_virt_mem(struct iavf_hw *hw,\n-\t\t\t\t   struct i40e_virt_mem *mem, u32 size);\n-iavf_status i40e_free_virt_mem(struct iavf_hw *hw, struct i40e_virt_mem *mem);\n-#endif /* _I40E_ALLOC_H_ */\n+iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem);\n+iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,\n+\t\t\t\t   struct iavf_virt_mem *mem, u32 size);\n+iavf_status iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem);\n+#endif /* _IAVF_ALLOC_H_ */\ndiff --git a/drivers/net/ethernet/intel/iavf/i40e_common.c b/drivers/net/ethernet/intel/iavf/i40e_common.c\nindex c830bb6f5943..d9fd2f24b3e7 100644\n--- a/drivers/net/ethernet/intel/iavf/i40e_common.c\n+++ b/drivers/net/ethernet/intel/iavf/i40e_common.c\n@@ -7,28 +7,28 @@\n #include <linux/avf/virtchnl.h>\n \n /**\n- * i40e_set_mac_type - Sets MAC type\n+ * iavf_set_mac_type - Sets MAC type\n  * @hw: pointer to the HW structure\n  *\n  * This function sets the mac type of the adapter based on the\n  * vendor ID and device ID stored in the hw structure.\n  **/\n-iavf_status i40e_set_mac_type(struct iavf_hw *hw)\n+iavf_status iavf_set_mac_type(struct iavf_hw *hw)\n {\n \tiavf_status status = 0;\n \n \tif (hw->vendor_id == PCI_VENDOR_ID_INTEL) {\n \t\tswitch (hw->device_id) {\n \t\tcase IAVF_DEV_ID_X722_VF:\n-\t\t\thw->mac.type = I40E_MAC_X722_VF;\n+\t\t\thw->mac.type = IAVF_MAC_X722_VF;\n \t\t\tbreak;\n \t\tcase IAVF_DEV_ID_VF:\n \t\tcase IAVF_DEV_ID_VF_HV:\n \t\tcase IAVF_DEV_ID_ADAPTIVE_VF:\n-\t\t\thw->mac.type = I40E_MAC_VF;\n+\t\t\thw->mac.type = IAVF_MAC_VF;\n \t\t\tbreak;\n \t\tdefault:\n-\t\t\thw->mac.type = I40E_MAC_GENERIC;\n+\t\t\thw->mac.type = IAVF_MAC_GENERIC;\n \t\t\tbreak;\n \t\t}\n \t} else {\n@@ -344,7 +344,7 @@ iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)\n }\n \n /**\n- * i40e_aq_get_set_rss_lut\n+ * iavf_aq_get_set_rss_lut\n  * @hw: pointer to the hardware structure\n  * @vsi_id: vsi fw index\n  * @pf_lut: for PF table set true, for VSI table set false\n@@ -354,7 +354,7 @@ iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)\n  *\n  * Internal function to get or set RSS look up table\n  **/\n-static iavf_status i40e_aq_get_set_rss_lut(struct iavf_hw *hw,\n+static iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,\n \t\t\t\t\t   u16 vsi_id, bool pf_lut,\n \t\t\t\t\t   u8 *lut, u16 lut_size,\n \t\t\t\t\t   bool set)\n@@ -410,7 +410,7 @@ static iavf_status i40e_aq_get_set_rss_lut(struct iavf_hw *hw,\n iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,\n \t\t\t\tbool pf_lut, u8 *lut, u16 lut_size)\n {\n-\treturn i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,\n+\treturn iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,\n \t\t\t\t       false);\n }\n \n@@ -427,11 +427,11 @@ iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,\n iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,\n \t\t\t\tbool pf_lut, u8 *lut, u16 lut_size)\n {\n-\treturn i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);\n+\treturn iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);\n }\n \n /**\n- * i40e_aq_get_set_rss_key\n+ * iavf_aq_get_set_rss_key\n  * @hw: pointer to the hw struct\n  * @vsi_id: vsi fw index\n  * @key: pointer to key info struct\n@@ -440,7 +440,7 @@ iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,\n  * get the RSS key per VSI\n  **/\n static\n-iavf_status i40e_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,\n+iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,\n \t\t\t\t    struct i40e_aqc_get_set_rss_key_data *key,\n \t\t\t\t    bool set)\n {\n@@ -482,7 +482,7 @@ iavf_status i40e_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,\n iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id,\n \t\t\t\tstruct i40e_aqc_get_set_rss_key_data *key)\n {\n-\treturn i40e_aq_get_set_rss_key(hw, vsi_id, key, false);\n+\treturn iavf_aq_get_set_rss_key(hw, vsi_id, key, false);\n }\n \n /**\n@@ -496,7 +496,7 @@ iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id,\n iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,\n \t\t\t\tstruct i40e_aqc_get_set_rss_key_data *key)\n {\n-\treturn i40e_aq_get_set_rss_key(hw, vsi_id, key, true);\n+\treturn iavf_aq_get_set_rss_key(hw, vsi_id, key, true);\n }\n \n /* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the\n@@ -518,350 +518,350 @@ iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,\n  * ELSE IF iavf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP\n  *      Use the rest of the fields to look at the tunnels, inner protocols, etc\n  * ELSE\n- *      Use the enum i40e_rx_l2_ptype to decode the packet type\n+ *      Use the enum iavf_rx_l2_ptype to decode the packet type\n  * ENDIF\n  */\n \n /* macro to make the table lines short */\n-#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\\\n+#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\\\n \t{\tPTYPE, \\\n \t\t1, \\\n-\t\tI40E_RX_PTYPE_OUTER_##OUTER_IP, \\\n-\t\tI40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \\\n-\t\tI40E_RX_PTYPE_##OUTER_FRAG, \\\n-\t\tI40E_RX_PTYPE_TUNNEL_##T, \\\n-\t\tI40E_RX_PTYPE_TUNNEL_END_##TE, \\\n-\t\tI40E_RX_PTYPE_##TEF, \\\n-\t\tI40E_RX_PTYPE_INNER_PROT_##I, \\\n-\t\tI40E_RX_PTYPE_PAYLOAD_LAYER_##PL }\n-\n-#define I40E_PTT_UNUSED_ENTRY(PTYPE) \\\n+\t\tIAVF_RX_PTYPE_OUTER_##OUTER_IP, \\\n+\t\tIAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \\\n+\t\tIAVF_RX_PTYPE_##OUTER_FRAG, \\\n+\t\tIAVF_RX_PTYPE_TUNNEL_##T, \\\n+\t\tIAVF_RX_PTYPE_TUNNEL_END_##TE, \\\n+\t\tIAVF_RX_PTYPE_##TEF, \\\n+\t\tIAVF_RX_PTYPE_INNER_PROT_##I, \\\n+\t\tIAVF_RX_PTYPE_PAYLOAD_LAYER_##PL }\n+\n+#define IAVF_PTT_UNUSED_ENTRY(PTYPE) \\\n \t\t{ PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }\n \n /* shorter macros makes the table fit but are terse */\n-#define I40E_RX_PTYPE_NOF\t\tI40E_RX_PTYPE_NOT_FRAG\n-#define I40E_RX_PTYPE_FRG\t\tI40E_RX_PTYPE_FRAG\n-#define I40E_RX_PTYPE_INNER_PROT_TS\tI40E_RX_PTYPE_INNER_PROT_TIMESYNC\n+#define IAVF_RX_PTYPE_NOF\t\tIAVF_RX_PTYPE_NOT_FRAG\n+#define IAVF_RX_PTYPE_FRG\t\tIAVF_RX_PTYPE_FRAG\n+#define IAVF_RX_PTYPE_INNER_PROT_TS\tIAVF_RX_PTYPE_INNER_PROT_TIMESYNC\n \n /* Lookup table mapping the HW PTYPE to the bit field for decoding */\n-struct i40e_rx_ptype_decoded iavf_ptype_lookup[] = {\n+struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {\n \t/* L2 Packet types */\n-\tI40E_PTT_UNUSED_ENTRY(0),\n-\tI40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),\n-\tI40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),\n-\tI40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),\n-\tI40E_PTT_UNUSED_ENTRY(4),\n-\tI40E_PTT_UNUSED_ENTRY(5),\n-\tI40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),\n-\tI40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),\n-\tI40E_PTT_UNUSED_ENTRY(8),\n-\tI40E_PTT_UNUSED_ENTRY(9),\n-\tI40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),\n-\tI40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),\n-\tI40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT_UNUSED_ENTRY(0),\n+\tIAVF_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),\n+\tIAVF_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),\n+\tIAVF_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),\n+\tIAVF_PTT_UNUSED_ENTRY(4),\n+\tIAVF_PTT_UNUSED_ENTRY(5),\n+\tIAVF_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),\n+\tIAVF_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),\n+\tIAVF_PTT_UNUSED_ENTRY(8),\n+\tIAVF_PTT_UNUSED_ENTRY(9),\n+\tIAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),\n+\tIAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),\n+\tIAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),\n \n \t/* Non Tunneled IPv4 */\n-\tI40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(25),\n-\tI40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),\n-\tI40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),\n-\tI40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),\n+\tIAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(25),\n+\tIAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),\n+\tIAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),\n+\tIAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),\n \n \t/* IPv4 --> IPv4 */\n-\tI40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),\n-\tI40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),\n-\tI40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(32),\n-\tI40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),\n-\tI40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),\n-\tI40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),\n+\tIAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),\n+\tIAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),\n+\tIAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(32),\n+\tIAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),\n+\tIAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),\n+\tIAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),\n \n \t/* IPv4 --> IPv6 */\n-\tI40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),\n-\tI40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),\n-\tI40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(39),\n-\tI40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),\n-\tI40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),\n-\tI40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),\n+\tIAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),\n+\tIAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),\n+\tIAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(39),\n+\tIAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),\n+\tIAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),\n+\tIAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),\n \n \t/* IPv4 --> GRE/NAT */\n-\tI40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),\n \n \t/* IPv4 --> GRE/NAT --> IPv4 */\n-\tI40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),\n-\tI40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),\n-\tI40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(47),\n-\tI40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),\n-\tI40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),\n-\tI40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),\n+\tIAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),\n+\tIAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),\n+\tIAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(47),\n+\tIAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),\n+\tIAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),\n+\tIAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),\n \n \t/* IPv4 --> GRE/NAT --> IPv6 */\n-\tI40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),\n-\tI40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),\n-\tI40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(54),\n-\tI40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),\n-\tI40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),\n-\tI40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),\n+\tIAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),\n+\tIAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),\n+\tIAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(54),\n+\tIAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),\n+\tIAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),\n+\tIAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),\n \n \t/* IPv4 --> GRE/NAT --> MAC */\n-\tI40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),\n \n \t/* IPv4 --> GRE/NAT --> MAC --> IPv4 */\n-\tI40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),\n-\tI40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),\n-\tI40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(62),\n-\tI40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),\n-\tI40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),\n-\tI40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),\n+\tIAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),\n+\tIAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),\n+\tIAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(62),\n+\tIAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),\n+\tIAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),\n+\tIAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),\n \n \t/* IPv4 --> GRE/NAT -> MAC --> IPv6 */\n-\tI40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),\n-\tI40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),\n-\tI40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(69),\n-\tI40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),\n-\tI40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),\n-\tI40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),\n+\tIAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),\n+\tIAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),\n+\tIAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(69),\n+\tIAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),\n+\tIAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),\n+\tIAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),\n \n \t/* IPv4 --> GRE/NAT --> MAC/VLAN */\n-\tI40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),\n \n \t/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */\n-\tI40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),\n-\tI40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),\n-\tI40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(77),\n-\tI40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),\n-\tI40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),\n-\tI40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),\n+\tIAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),\n+\tIAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),\n+\tIAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(77),\n+\tIAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),\n+\tIAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),\n+\tIAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),\n \n \t/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */\n-\tI40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),\n-\tI40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),\n-\tI40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(84),\n-\tI40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),\n-\tI40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),\n-\tI40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),\n+\tIAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),\n+\tIAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),\n+\tIAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(84),\n+\tIAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),\n+\tIAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),\n+\tIAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),\n \n \t/* Non Tunneled IPv6 */\n-\tI40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),\n-\tI40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),\n-\tI40E_PTT_UNUSED_ENTRY(91),\n-\tI40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),\n-\tI40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),\n-\tI40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),\n+\tIAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),\n+\tIAVF_PTT_UNUSED_ENTRY(91),\n+\tIAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),\n+\tIAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),\n+\tIAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),\n \n \t/* IPv6 --> IPv4 */\n-\tI40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),\n-\tI40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),\n-\tI40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(98),\n-\tI40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),\n-\tI40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),\n-\tI40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),\n+\tIAVF_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),\n+\tIAVF_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),\n+\tIAVF_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(98),\n+\tIAVF_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),\n+\tIAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),\n+\tIAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),\n \n \t/* IPv6 --> IPv6 */\n-\tI40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),\n-\tI40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),\n-\tI40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(105),\n-\tI40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),\n-\tI40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),\n-\tI40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),\n+\tIAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),\n+\tIAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),\n+\tIAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(105),\n+\tIAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),\n+\tIAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),\n+\tIAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),\n \n \t/* IPv6 --> GRE/NAT */\n-\tI40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),\n \n \t/* IPv6 --> GRE/NAT -> IPv4 */\n-\tI40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),\n-\tI40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),\n-\tI40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(113),\n-\tI40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),\n-\tI40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),\n-\tI40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),\n+\tIAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),\n+\tIAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),\n+\tIAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(113),\n+\tIAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),\n+\tIAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),\n+\tIAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),\n \n \t/* IPv6 --> GRE/NAT -> IPv6 */\n-\tI40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),\n-\tI40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),\n-\tI40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(120),\n-\tI40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),\n-\tI40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),\n-\tI40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),\n+\tIAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),\n+\tIAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),\n+\tIAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(120),\n+\tIAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),\n+\tIAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),\n+\tIAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),\n \n \t/* IPv6 --> GRE/NAT -> MAC */\n-\tI40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),\n \n \t/* IPv6 --> GRE/NAT -> MAC -> IPv4 */\n-\tI40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),\n-\tI40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),\n-\tI40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(128),\n-\tI40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),\n-\tI40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),\n-\tI40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),\n+\tIAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),\n+\tIAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),\n+\tIAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(128),\n+\tIAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),\n+\tIAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),\n+\tIAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),\n \n \t/* IPv6 --> GRE/NAT -> MAC -> IPv6 */\n-\tI40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),\n-\tI40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),\n-\tI40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(135),\n-\tI40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),\n-\tI40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),\n-\tI40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),\n+\tIAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),\n+\tIAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),\n+\tIAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(135),\n+\tIAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),\n+\tIAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),\n+\tIAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),\n \n \t/* IPv6 --> GRE/NAT -> MAC/VLAN */\n-\tI40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),\n+\tIAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),\n \n \t/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */\n-\tI40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),\n-\tI40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),\n-\tI40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(143),\n-\tI40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),\n-\tI40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),\n-\tI40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),\n+\tIAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),\n+\tIAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),\n+\tIAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(143),\n+\tIAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),\n+\tIAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),\n+\tIAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),\n \n \t/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */\n-\tI40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),\n-\tI40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),\n-\tI40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),\n-\tI40E_PTT_UNUSED_ENTRY(150),\n-\tI40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),\n-\tI40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),\n-\tI40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),\n+\tIAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),\n+\tIAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),\n+\tIAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),\n+\tIAVF_PTT_UNUSED_ENTRY(150),\n+\tIAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),\n+\tIAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),\n+\tIAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),\n \n \t/* unused entries */\n-\tI40E_PTT_UNUSED_ENTRY(154),\n-\tI40E_PTT_UNUSED_ENTRY(155),\n-\tI40E_PTT_UNUSED_ENTRY(156),\n-\tI40E_PTT_UNUSED_ENTRY(157),\n-\tI40E_PTT_UNUSED_ENTRY(158),\n-\tI40E_PTT_UNUSED_ENTRY(159),\n-\n-\tI40E_PTT_UNUSED_ENTRY(160),\n-\tI40E_PTT_UNUSED_ENTRY(161),\n-\tI40E_PTT_UNUSED_ENTRY(162),\n-\tI40E_PTT_UNUSED_ENTRY(163),\n-\tI40E_PTT_UNUSED_ENTRY(164),\n-\tI40E_PTT_UNUSED_ENTRY(165),\n-\tI40E_PTT_UNUSED_ENTRY(166),\n-\tI40E_PTT_UNUSED_ENTRY(167),\n-\tI40E_PTT_UNUSED_ENTRY(168),\n-\tI40E_PTT_UNUSED_ENTRY(169),\n-\n-\tI40E_PTT_UNUSED_ENTRY(170),\n-\tI40E_PTT_UNUSED_ENTRY(171),\n-\tI40E_PTT_UNUSED_ENTRY(172),\n-\tI40E_PTT_UNUSED_ENTRY(173),\n-\tI40E_PTT_UNUSED_ENTRY(174),\n-\tI40E_PTT_UNUSED_ENTRY(175),\n-\tI40E_PTT_UNUSED_ENTRY(176),\n-\tI40E_PTT_UNUSED_ENTRY(177),\n-\tI40E_PTT_UNUSED_ENTRY(178),\n-\tI40E_PTT_UNUSED_ENTRY(179),\n-\n-\tI40E_PTT_UNUSED_ENTRY(180),\n-\tI40E_PTT_UNUSED_ENTRY(181),\n-\tI40E_PTT_UNUSED_ENTRY(182),\n-\tI40E_PTT_UNUSED_ENTRY(183),\n-\tI40E_PTT_UNUSED_ENTRY(184),\n-\tI40E_PTT_UNUSED_ENTRY(185),\n-\tI40E_PTT_UNUSED_ENTRY(186),\n-\tI40E_PTT_UNUSED_ENTRY(187),\n-\tI40E_PTT_UNUSED_ENTRY(188),\n-\tI40E_PTT_UNUSED_ENTRY(189),\n-\n-\tI40E_PTT_UNUSED_ENTRY(190),\n-\tI40E_PTT_UNUSED_ENTRY(191),\n-\tI40E_PTT_UNUSED_ENTRY(192),\n-\tI40E_PTT_UNUSED_ENTRY(193),\n-\tI40E_PTT_UNUSED_ENTRY(194),\n-\tI40E_PTT_UNUSED_ENTRY(195),\n-\tI40E_PTT_UNUSED_ENTRY(196),\n-\tI40E_PTT_UNUSED_ENTRY(197),\n-\tI40E_PTT_UNUSED_ENTRY(198),\n-\tI40E_PTT_UNUSED_ENTRY(199),\n-\n-\tI40E_PTT_UNUSED_ENTRY(200),\n-\tI40E_PTT_UNUSED_ENTRY(201),\n-\tI40E_PTT_UNUSED_ENTRY(202),\n-\tI40E_PTT_UNUSED_ENTRY(203),\n-\tI40E_PTT_UNUSED_ENTRY(204),\n-\tI40E_PTT_UNUSED_ENTRY(205),\n-\tI40E_PTT_UNUSED_ENTRY(206),\n-\tI40E_PTT_UNUSED_ENTRY(207),\n-\tI40E_PTT_UNUSED_ENTRY(208),\n-\tI40E_PTT_UNUSED_ENTRY(209),\n-\n-\tI40E_PTT_UNUSED_ENTRY(210),\n-\tI40E_PTT_UNUSED_ENTRY(211),\n-\tI40E_PTT_UNUSED_ENTRY(212),\n-\tI40E_PTT_UNUSED_ENTRY(213),\n-\tI40E_PTT_UNUSED_ENTRY(214),\n-\tI40E_PTT_UNUSED_ENTRY(215),\n-\tI40E_PTT_UNUSED_ENTRY(216),\n-\tI40E_PTT_UNUSED_ENTRY(217),\n-\tI40E_PTT_UNUSED_ENTRY(218),\n-\tI40E_PTT_UNUSED_ENTRY(219),\n-\n-\tI40E_PTT_UNUSED_ENTRY(220),\n-\tI40E_PTT_UNUSED_ENTRY(221),\n-\tI40E_PTT_UNUSED_ENTRY(222),\n-\tI40E_PTT_UNUSED_ENTRY(223),\n-\tI40E_PTT_UNUSED_ENTRY(224),\n-\tI40E_PTT_UNUSED_ENTRY(225),\n-\tI40E_PTT_UNUSED_ENTRY(226),\n-\tI40E_PTT_UNUSED_ENTRY(227),\n-\tI40E_PTT_UNUSED_ENTRY(228),\n-\tI40E_PTT_UNUSED_ENTRY(229),\n-\n-\tI40E_PTT_UNUSED_ENTRY(230),\n-\tI40E_PTT_UNUSED_ENTRY(231),\n-\tI40E_PTT_UNUSED_ENTRY(232),\n-\tI40E_PTT_UNUSED_ENTRY(233),\n-\tI40E_PTT_UNUSED_ENTRY(234),\n-\tI40E_PTT_UNUSED_ENTRY(235),\n-\tI40E_PTT_UNUSED_ENTRY(236),\n-\tI40E_PTT_UNUSED_ENTRY(237),\n-\tI40E_PTT_UNUSED_ENTRY(238),\n-\tI40E_PTT_UNUSED_ENTRY(239),\n-\n-\tI40E_PTT_UNUSED_ENTRY(240),\n-\tI40E_PTT_UNUSED_ENTRY(241),\n-\tI40E_PTT_UNUSED_ENTRY(242),\n-\tI40E_PTT_UNUSED_ENTRY(243),\n-\tI40E_PTT_UNUSED_ENTRY(244),\n-\tI40E_PTT_UNUSED_ENTRY(245),\n-\tI40E_PTT_UNUSED_ENTRY(246),\n-\tI40E_PTT_UNUSED_ENTRY(247),\n-\tI40E_PTT_UNUSED_ENTRY(248),\n-\tI40E_PTT_UNUSED_ENTRY(249),\n-\n-\tI40E_PTT_UNUSED_ENTRY(250),\n-\tI40E_PTT_UNUSED_ENTRY(251),\n-\tI40E_PTT_UNUSED_ENTRY(252),\n-\tI40E_PTT_UNUSED_ENTRY(253),\n-\tI40E_PTT_UNUSED_ENTRY(254),\n-\tI40E_PTT_UNUSED_ENTRY(255)\n+\tIAVF_PTT_UNUSED_ENTRY(154),\n+\tIAVF_PTT_UNUSED_ENTRY(155),\n+\tIAVF_PTT_UNUSED_ENTRY(156),\n+\tIAVF_PTT_UNUSED_ENTRY(157),\n+\tIAVF_PTT_UNUSED_ENTRY(158),\n+\tIAVF_PTT_UNUSED_ENTRY(159),\n+\n+\tIAVF_PTT_UNUSED_ENTRY(160),\n+\tIAVF_PTT_UNUSED_ENTRY(161),\n+\tIAVF_PTT_UNUSED_ENTRY(162),\n+\tIAVF_PTT_UNUSED_ENTRY(163),\n+\tIAVF_PTT_UNUSED_ENTRY(164),\n+\tIAVF_PTT_UNUSED_ENTRY(165),\n+\tIAVF_PTT_UNUSED_ENTRY(166),\n+\tIAVF_PTT_UNUSED_ENTRY(167),\n+\tIAVF_PTT_UNUSED_ENTRY(168),\n+\tIAVF_PTT_UNUSED_ENTRY(169),\n+\n+\tIAVF_PTT_UNUSED_ENTRY(170),\n+\tIAVF_PTT_UNUSED_ENTRY(171),\n+\tIAVF_PTT_UNUSED_ENTRY(172),\n+\tIAVF_PTT_UNUSED_ENTRY(173),\n+\tIAVF_PTT_UNUSED_ENTRY(174),\n+\tIAVF_PTT_UNUSED_ENTRY(175),\n+\tIAVF_PTT_UNUSED_ENTRY(176),\n+\tIAVF_PTT_UNUSED_ENTRY(177),\n+\tIAVF_PTT_UNUSED_ENTRY(178),\n+\tIAVF_PTT_UNUSED_ENTRY(179),\n+\n+\tIAVF_PTT_UNUSED_ENTRY(180),\n+\tIAVF_PTT_UNUSED_ENTRY(181),\n+\tIAVF_PTT_UNUSED_ENTRY(182),\n+\tIAVF_PTT_UNUSED_ENTRY(183),\n+\tIAVF_PTT_UNUSED_ENTRY(184),\n+\tIAVF_PTT_UNUSED_ENTRY(185),\n+\tIAVF_PTT_UNUSED_ENTRY(186),\n+\tIAVF_PTT_UNUSED_ENTRY(187),\n+\tIAVF_PTT_UNUSED_ENTRY(188),\n+\tIAVF_PTT_UNUSED_ENTRY(189),\n+\n+\tIAVF_PTT_UNUSED_ENTRY(190),\n+\tIAVF_PTT_UNUSED_ENTRY(191),\n+\tIAVF_PTT_UNUSED_ENTRY(192),\n+\tIAVF_PTT_UNUSED_ENTRY(193),\n+\tIAVF_PTT_UNUSED_ENTRY(194),\n+\tIAVF_PTT_UNUSED_ENTRY(195),\n+\tIAVF_PTT_UNUSED_ENTRY(196),\n+\tIAVF_PTT_UNUSED_ENTRY(197),\n+\tIAVF_PTT_UNUSED_ENTRY(198),\n+\tIAVF_PTT_UNUSED_ENTRY(199),\n+\n+\tIAVF_PTT_UNUSED_ENTRY(200),\n+\tIAVF_PTT_UNUSED_ENTRY(201),\n+\tIAVF_PTT_UNUSED_ENTRY(202),\n+\tIAVF_PTT_UNUSED_ENTRY(203),\n+\tIAVF_PTT_UNUSED_ENTRY(204),\n+\tIAVF_PTT_UNUSED_ENTRY(205),\n+\tIAVF_PTT_UNUSED_ENTRY(206),\n+\tIAVF_PTT_UNUSED_ENTRY(207),\n+\tIAVF_PTT_UNUSED_ENTRY(208),\n+\tIAVF_PTT_UNUSED_ENTRY(209),\n+\n+\tIAVF_PTT_UNUSED_ENTRY(210),\n+\tIAVF_PTT_UNUSED_ENTRY(211),\n+\tIAVF_PTT_UNUSED_ENTRY(212),\n+\tIAVF_PTT_UNUSED_ENTRY(213),\n+\tIAVF_PTT_UNUSED_ENTRY(214),\n+\tIAVF_PTT_UNUSED_ENTRY(215),\n+\tIAVF_PTT_UNUSED_ENTRY(216),\n+\tIAVF_PTT_UNUSED_ENTRY(217),\n+\tIAVF_PTT_UNUSED_ENTRY(218),\n+\tIAVF_PTT_UNUSED_ENTRY(219),\n+\n+\tIAVF_PTT_UNUSED_ENTRY(220),\n+\tIAVF_PTT_UNUSED_ENTRY(221),\n+\tIAVF_PTT_UNUSED_ENTRY(222),\n+\tIAVF_PTT_UNUSED_ENTRY(223),\n+\tIAVF_PTT_UNUSED_ENTRY(224),\n+\tIAVF_PTT_UNUSED_ENTRY(225),\n+\tIAVF_PTT_UNUSED_ENTRY(226),\n+\tIAVF_PTT_UNUSED_ENTRY(227),\n+\tIAVF_PTT_UNUSED_ENTRY(228),\n+\tIAVF_PTT_UNUSED_ENTRY(229),\n+\n+\tIAVF_PTT_UNUSED_ENTRY(230),\n+\tIAVF_PTT_UNUSED_ENTRY(231),\n+\tIAVF_PTT_UNUSED_ENTRY(232),\n+\tIAVF_PTT_UNUSED_ENTRY(233),\n+\tIAVF_PTT_UNUSED_ENTRY(234),\n+\tIAVF_PTT_UNUSED_ENTRY(235),\n+\tIAVF_PTT_UNUSED_ENTRY(236),\n+\tIAVF_PTT_UNUSED_ENTRY(237),\n+\tIAVF_PTT_UNUSED_ENTRY(238),\n+\tIAVF_PTT_UNUSED_ENTRY(239),\n+\n+\tIAVF_PTT_UNUSED_ENTRY(240),\n+\tIAVF_PTT_UNUSED_ENTRY(241),\n+\tIAVF_PTT_UNUSED_ENTRY(242),\n+\tIAVF_PTT_UNUSED_ENTRY(243),\n+\tIAVF_PTT_UNUSED_ENTRY(244),\n+\tIAVF_PTT_UNUSED_ENTRY(245),\n+\tIAVF_PTT_UNUSED_ENTRY(246),\n+\tIAVF_PTT_UNUSED_ENTRY(247),\n+\tIAVF_PTT_UNUSED_ENTRY(248),\n+\tIAVF_PTT_UNUSED_ENTRY(249),\n+\n+\tIAVF_PTT_UNUSED_ENTRY(250),\n+\tIAVF_PTT_UNUSED_ENTRY(251),\n+\tIAVF_PTT_UNUSED_ENTRY(252),\n+\tIAVF_PTT_UNUSED_ENTRY(253),\n+\tIAVF_PTT_UNUSED_ENTRY(254),\n+\tIAVF_PTT_UNUSED_ENTRY(255)\n };\n \n /**\ndiff --git a/drivers/net/ethernet/intel/iavf/i40e_osdep.h b/drivers/net/ethernet/intel/iavf/i40e_osdep.h\nindex 412d534a4bbe..e6e0b0328706 100644\n--- a/drivers/net/ethernet/intel/iavf/i40e_osdep.h\n+++ b/drivers/net/ethernet/intel/iavf/i40e_osdep.h\n@@ -1,8 +1,8 @@\n /* SPDX-License-Identifier: GPL-2.0 */\n /* Copyright(c) 2013 - 2018 Intel Corporation. */\n \n-#ifndef _I40E_OSDEP_H_\n-#define _I40E_OSDEP_H_\n+#ifndef _IAVF_OSDEP_H_\n+#define _IAVF_OSDEP_H_\n \n #include <linux/types.h>\n #include <linux/if_ether.h>\n@@ -27,26 +27,26 @@\n #define iavf_flush(a)\t\treadl((a)->hw_addr + IAVF_VFGEN_RSTAT)\n \n /* memory allocation tracking */\n-struct i40e_dma_mem {\n+struct iavf_dma_mem {\n \tvoid *va;\n \tdma_addr_t pa;\n \tu32 size;\n };\n \n-#define i40e_allocate_dma_mem(h, m, unused, s, a) \\\n+#define iavf_allocate_dma_mem(h, m, unused, s, a) \\\n \tiavf_allocate_dma_mem_d(h, m, s, a)\n-#define i40e_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m)\n+#define iavf_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m)\n \n-struct i40e_virt_mem {\n+struct iavf_virt_mem {\n \tvoid *va;\n \tu32 size;\n };\n-#define i40e_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s)\n-#define i40e_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m)\n+#define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s)\n+#define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m)\n \n #define iavf_debug(h, m, s, ...)  iavf_debug_d(h, m, s, ##__VA_ARGS__)\n extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)\n \t__attribute__ ((format(gnu_printf, 3, 4)));\n \n-typedef enum i40e_status_code iavf_status;\n-#endif /* _I40E_OSDEP_H_ */\n+typedef enum iavf_status_code iavf_status;\n+#endif /* _IAVF_OSDEP_H_ */\ndiff --git a/drivers/net/ethernet/intel/iavf/i40e_prototype.h b/drivers/net/ethernet/intel/iavf/i40e_prototype.h\nindex 5d59104b97b9..dca62e3b951f 100644\n--- a/drivers/net/ethernet/intel/iavf/i40e_prototype.h\n+++ b/drivers/net/ethernet/intel/iavf/i40e_prototype.h\n@@ -1,8 +1,8 @@\n /* SPDX-License-Identifier: GPL-2.0 */\n /* Copyright(c) 2013 - 2018 Intel Corporation. */\n \n-#ifndef _I40E_PROTOTYPE_H_\n-#define _I40E_PROTOTYPE_H_\n+#ifndef _IAVF_PROTOTYPE_H_\n+#define _IAVF_PROTOTYPE_H_\n \n #include \"i40e_type.h\"\n #include \"i40e_alloc.h\"\n@@ -48,16 +48,15 @@ iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid,\n iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,\n \t\t\t\tstruct i40e_aqc_get_set_rss_key_data *key);\n \n-iavf_status i40e_set_mac_type(struct iavf_hw *hw);\n+iavf_status iavf_set_mac_type(struct iavf_hw *hw);\n \n-extern struct i40e_rx_ptype_decoded iavf_ptype_lookup[];\n+extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];\n \n-static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)\n+static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)\n {\n \treturn iavf_ptype_lookup[ptype];\n }\n \n-/* i40e_common for VF drivers*/\n void iavf_vf_parse_hw_config(struct iavf_hw *hw,\n \t\t\t     struct virtchnl_vf_resource *msg);\n iavf_status iavf_vf_reset(struct iavf_hw *hw);\n@@ -65,4 +64,4 @@ iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,\n \t\t\t\t   enum virtchnl_ops v_opcode,\n \t\t\t\t   iavf_status v_retval, u8 *msg, u16 msglen,\n \t\t\t\t   struct i40e_asq_cmd_details *cmd_details);\n-#endif /* _I40E_PROTOTYPE_H_ */\n+#endif /* _IAVF_PROTOTYPE_H_ */\ndiff --git a/drivers/net/ethernet/intel/iavf/i40e_status.h b/drivers/net/ethernet/intel/iavf/i40e_status.h\nindex 77be0702d07c..46742fab7b8c 100644\n--- a/drivers/net/ethernet/intel/iavf/i40e_status.h\n+++ b/drivers/net/ethernet/intel/iavf/i40e_status.h\n@@ -1,11 +1,11 @@\n /* SPDX-License-Identifier: GPL-2.0 */\n /* Copyright(c) 2013 - 2018 Intel Corporation. */\n \n-#ifndef _I40E_STATUS_H_\n-#define _I40E_STATUS_H_\n+#ifndef _IAVF_STATUS_H_\n+#define _IAVF_STATUS_H_\n \n /* Error Codes */\n-enum i40e_status_code {\n+enum iavf_status_code {\n \tI40E_SUCCESS\t\t\t\t= 0,\n \tI40E_ERR_NVM\t\t\t\t= -1,\n \tI40E_ERR_NVM_CHECKSUM\t\t\t= -2,\n@@ -75,4 +75,4 @@ enum i40e_status_code {\n \tI40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR\t= -66,\n };\n \n-#endif /* _I40E_STATUS_H_ */\n+#endif /* _IAVF_STATUS_H_ */\ndiff --git a/drivers/net/ethernet/intel/iavf/i40e_type.h b/drivers/net/ethernet/intel/iavf/i40e_type.h\nindex 773df7afb9ac..6a917dbfb981 100644\n--- a/drivers/net/ethernet/intel/iavf/i40e_type.h\n+++ b/drivers/net/ethernet/intel/iavf/i40e_type.h\n@@ -1,8 +1,8 @@\n /* SPDX-License-Identifier: GPL-2.0 */\n /* Copyright(c) 2013 - 2018 Intel Corporation. */\n \n-#ifndef _I40E_TYPE_H_\n-#define _I40E_TYPE_H_\n+#ifndef _IAVF_TYPE_H_\n+#define _IAVF_TYPE_H_\n \n #include \"i40e_status.h\"\n #include \"i40e_osdep.h\"\n@@ -10,14 +10,14 @@\n #include \"i40e_adminq.h\"\n #include \"i40e_devids.h\"\n \n-#define I40E_RXQ_CTX_DBUFF_SHIFT 7\n+#define IAVF_RXQ_CTX_DBUFF_SHIFT 7\n \n-/* I40E_MASK is a macro used on 32 bit registers */\n+/* IAVF_MASK is a macro used on 32 bit registers */\n #define IAVF_MASK(mask, shift) ((u32)(mask) << (shift))\n \n-#define I40E_MAX_VSI_QP\t\t\t16\n-#define I40E_MAX_VF_VSI\t\t\t3\n-#define I40E_MAX_CHAINED_RX_BUFFERS\t5\n+#define IAVF_MAX_VSI_QP\t\t\t16\n+#define IAVF_MAX_VF_VSI\t\t\t3\n+#define IAVF_MAX_CHAINED_RX_BUFFERS\t5\n \n /* forward declaration */\n struct iavf_hw;\n@@ -25,40 +25,40 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct iavf_hw *, struct i40e_aq_desc *);\n \n /* Data type manipulation macros. */\n \n-#define I40E_DESC_UNUSED(R)\t\\\n+#define IAVF_DESC_UNUSED(R)\t\\\n \t((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \\\n \t(R)->next_to_clean - (R)->next_to_use - 1)\n \n /* bitfields for Tx queue mapping in QTX_CTL */\n-#define I40E_QTX_CTL_VF_QUEUE\t0x0\n-#define I40E_QTX_CTL_VM_QUEUE\t0x1\n-#define I40E_QTX_CTL_PF_QUEUE\t0x2\n+#define IAVF_QTX_CTL_VF_QUEUE\t0x0\n+#define IAVF_QTX_CTL_VM_QUEUE\t0x1\n+#define IAVF_QTX_CTL_PF_QUEUE\t0x2\n \n /* debug masks - set these bits in hw->debug_mask to control output */\n enum iavf_debug_mask {\n-\tI40E_DEBUG_INIT\t\t\t= 0x00000001,\n-\tI40E_DEBUG_RELEASE\t\t= 0x00000002,\n+\tIAVF_DEBUG_INIT\t\t\t= 0x00000001,\n+\tIAVF_DEBUG_RELEASE\t\t= 0x00000002,\n \n-\tI40E_DEBUG_LINK\t\t\t= 0x00000010,\n-\tI40E_DEBUG_PHY\t\t\t= 0x00000020,\n-\tI40E_DEBUG_HMC\t\t\t= 0x00000040,\n-\tI40E_DEBUG_NVM\t\t\t= 0x00000080,\n-\tI40E_DEBUG_LAN\t\t\t= 0x00000100,\n-\tI40E_DEBUG_FLOW\t\t\t= 0x00000200,\n-\tI40E_DEBUG_DCB\t\t\t= 0x00000400,\n-\tI40E_DEBUG_DIAG\t\t\t= 0x00000800,\n-\tI40E_DEBUG_FD\t\t\t= 0x00001000,\n-\tI40E_DEBUG_PACKAGE\t\t= 0x00002000,\n+\tIAVF_DEBUG_LINK\t\t\t= 0x00000010,\n+\tIAVF_DEBUG_PHY\t\t\t= 0x00000020,\n+\tIAVF_DEBUG_HMC\t\t\t= 0x00000040,\n+\tIAVF_DEBUG_NVM\t\t\t= 0x00000080,\n+\tIAVF_DEBUG_LAN\t\t\t= 0x00000100,\n+\tIAVF_DEBUG_FLOW\t\t\t= 0x00000200,\n+\tIAVF_DEBUG_DCB\t\t\t= 0x00000400,\n+\tIAVF_DEBUG_DIAG\t\t\t= 0x00000800,\n+\tIAVF_DEBUG_FD\t\t\t= 0x00001000,\n+\tIAVF_DEBUG_PACKAGE\t\t= 0x00002000,\n \n-\tI40E_DEBUG_AQ_MESSAGE\t\t= 0x01000000,\n-\tI40E_DEBUG_AQ_DESCRIPTOR\t= 0x02000000,\n-\tI40E_DEBUG_AQ_DESC_BUFFER\t= 0x04000000,\n-\tI40E_DEBUG_AQ_COMMAND\t\t= 0x06000000,\n-\tI40E_DEBUG_AQ\t\t\t= 0x0F000000,\n+\tIAVF_DEBUG_AQ_MESSAGE\t\t= 0x01000000,\n+\tIAVF_DEBUG_AQ_DESCRIPTOR\t= 0x02000000,\n+\tIAVF_DEBUG_AQ_DESC_BUFFER\t= 0x04000000,\n+\tIAVF_DEBUG_AQ_COMMAND\t\t= 0x06000000,\n+\tIAVF_DEBUG_AQ\t\t\t= 0x0F000000,\n \n-\tI40E_DEBUG_USER\t\t\t= 0xF0000000,\n+\tIAVF_DEBUG_USER\t\t\t= 0xF0000000,\n \n-\tI40E_DEBUG_ALL\t\t\t= 0xFFFFFFFF\n+\tIAVF_DEBUG_ALL\t\t\t= 0xFFFFFFFF\n };\n \n /* These are structs for managing the hardware information and the operations.\n@@ -69,35 +69,35 @@ enum iavf_debug_mask {\n  * the Firmware and AdminQ are intended to insulate the driver from most of the\n  * future changes, but these structures will also do part of the job.\n  */\n-enum i40e_mac_type {\n-\tI40E_MAC_UNKNOWN = 0,\n-\tI40E_MAC_XL710,\n-\tI40E_MAC_VF,\n-\tI40E_MAC_X722,\n-\tI40E_MAC_X722_VF,\n-\tI40E_MAC_GENERIC,\n-};\n-\n-enum i40e_vsi_type {\n-\tI40E_VSI_MAIN\t= 0,\n-\tI40E_VSI_VMDQ1\t= 1,\n-\tI40E_VSI_VMDQ2\t= 2,\n-\tI40E_VSI_CTRL\t= 3,\n-\tI40E_VSI_FCOE\t= 4,\n-\tI40E_VSI_MIRROR\t= 5,\n-\tI40E_VSI_SRIOV\t= 6,\n-\tI40E_VSI_FDIR\t= 7,\n-\tI40E_VSI_TYPE_UNKNOWN\n-};\n-\n-enum i40e_queue_type {\n-\tI40E_QUEUE_TYPE_RX = 0,\n-\tI40E_QUEUE_TYPE_TX,\n-\tI40E_QUEUE_TYPE_PE_CEQ,\n-\tI40E_QUEUE_TYPE_UNKNOWN\n-};\n-\n-#define I40E_HW_CAP_MAX_GPIO\t\t30\n+enum iavf_mac_type {\n+\tIAVF_MAC_UNKNOWN = 0,\n+\tIAVF_MAC_XL710,\n+\tIAVF_MAC_VF,\n+\tIAVF_MAC_X722,\n+\tIAVF_MAC_X722_VF,\n+\tIAVF_MAC_GENERIC,\n+};\n+\n+enum iavf_vsi_type {\n+\tIAVF_VSI_MAIN\t= 0,\n+\tIAVF_VSI_VMDQ1\t= 1,\n+\tIAVF_VSI_VMDQ2\t= 2,\n+\tIAVF_VSI_CTRL\t= 3,\n+\tIAVF_VSI_FCOE\t= 4,\n+\tIAVF_VSI_MIRROR\t= 5,\n+\tIAVF_VSI_SRIOV\t= 6,\n+\tIAVF_VSI_FDIR\t= 7,\n+\tIAVF_VSI_TYPE_UNKNOWN\n+};\n+\n+enum iavf_queue_type {\n+\tIAVF_QUEUE_TYPE_RX = 0,\n+\tIAVF_QUEUE_TYPE_TX,\n+\tIAVF_QUEUE_TYPE_PE_CEQ,\n+\tIAVF_QUEUE_TYPE_UNKNOWN\n+};\n+\n+#define IAVF_HW_CAP_MAX_GPIO\t\t30\n /* Capabilities of a PF or a VF or the whole device */\n struct iavf_hw_capabilities {\n \tbool dcb;\n@@ -109,8 +109,8 @@ struct iavf_hw_capabilities {\n \tu32 num_msix_vectors_vf;\n };\n \n-struct i40e_mac_info {\n-\tenum i40e_mac_type type;\n+struct iavf_mac_info {\n+\tenum iavf_mac_type type;\n \tu8 addr[ETH_ALEN];\n \tu8 perm_addr[ETH_ALEN];\n \tu8 san_addr[ETH_ALEN];\n@@ -118,45 +118,45 @@ struct i40e_mac_info {\n };\n \n /* PCI bus types */\n-enum i40e_bus_type {\n-\ti40e_bus_type_unknown = 0,\n-\ti40e_bus_type_pci,\n-\ti40e_bus_type_pcix,\n-\ti40e_bus_type_pci_express,\n-\ti40e_bus_type_reserved\n+enum iavf_bus_type {\n+\tiavf_bus_type_unknown = 0,\n+\tiavf_bus_type_pci,\n+\tiavf_bus_type_pcix,\n+\tiavf_bus_type_pci_express,\n+\tiavf_bus_type_reserved\n };\n \n /* PCI bus speeds */\n-enum i40e_bus_speed {\n-\ti40e_bus_speed_unknown\t= 0,\n-\ti40e_bus_speed_33\t= 33,\n-\ti40e_bus_speed_66\t= 66,\n-\ti40e_bus_speed_100\t= 100,\n-\ti40e_bus_speed_120\t= 120,\n-\ti40e_bus_speed_133\t= 133,\n-\ti40e_bus_speed_2500\t= 2500,\n-\ti40e_bus_speed_5000\t= 5000,\n-\ti40e_bus_speed_8000\t= 8000,\n-\ti40e_bus_speed_reserved\n+enum iavf_bus_speed {\n+\tiavf_bus_speed_unknown\t= 0,\n+\tiavf_bus_speed_33\t= 33,\n+\tiavf_bus_speed_66\t= 66,\n+\tiavf_bus_speed_100\t= 100,\n+\tiavf_bus_speed_120\t= 120,\n+\tiavf_bus_speed_133\t= 133,\n+\tiavf_bus_speed_2500\t= 2500,\n+\tiavf_bus_speed_5000\t= 5000,\n+\tiavf_bus_speed_8000\t= 8000,\n+\tiavf_bus_speed_reserved\n };\n \n /* PCI bus widths */\n-enum i40e_bus_width {\n-\ti40e_bus_width_unknown\t= 0,\n-\ti40e_bus_width_pcie_x1\t= 1,\n-\ti40e_bus_width_pcie_x2\t= 2,\n-\ti40e_bus_width_pcie_x4\t= 4,\n-\ti40e_bus_width_pcie_x8\t= 8,\n-\ti40e_bus_width_32\t= 32,\n-\ti40e_bus_width_64\t= 64,\n-\ti40e_bus_width_reserved\n+enum iavf_bus_width {\n+\tiavf_bus_width_unknown\t= 0,\n+\tiavf_bus_width_pcie_x1\t= 1,\n+\tiavf_bus_width_pcie_x2\t= 2,\n+\tiavf_bus_width_pcie_x4\t= 4,\n+\tiavf_bus_width_pcie_x8\t= 8,\n+\tiavf_bus_width_32\t= 32,\n+\tiavf_bus_width_64\t= 64,\n+\tiavf_bus_width_reserved\n };\n \n /* Bus parameters */\n-struct i40e_bus_info {\n-\tenum i40e_bus_speed speed;\n-\tenum i40e_bus_width width;\n-\tenum i40e_bus_type type;\n+struct iavf_bus_info {\n+\tenum iavf_bus_speed speed;\n+\tenum iavf_bus_width width;\n+\tenum iavf_bus_type type;\n \n \tu16 func;\n \tu16 device;\n@@ -164,16 +164,15 @@ struct i40e_bus_info {\n \tu16 bus_id;\n };\n \n-#define I40E_MAX_TRAFFIC_CLASS\t\t8\n-#define I40E_MAX_USER_PRIORITY\t\t8\n+#define IAVF_MAX_USER_PRIORITY\t\t8\n /* Port hardware description */\n struct iavf_hw {\n \tu8 __iomem *hw_addr;\n \tvoid *back;\n \n \t/* subsystem structs */\n-\tstruct i40e_mac_info mac;\n-\tstruct i40e_bus_info bus;\n+\tstruct iavf_mac_info mac;\n+\tstruct iavf_bus_info bus;\n \n \t/* pci info */\n \tu16 device_id;\n@@ -186,14 +185,14 @@ struct iavf_hw {\n \tstruct iavf_hw_capabilities dev_caps;\n \n \t/* Admin Queue info */\n-\tstruct i40e_adminq_info aq;\n+\tstruct iavf_adminq_info aq;\n \n \t/* debug mask */\n \tu32 debug_mask;\n \tchar err_str[16];\n };\n \n-struct i40e_driver_version {\n+struct iavf_driver_version {\n \tu8 major_version;\n \tu8 minor_version;\n \tu8 build_version;\n@@ -202,7 +201,7 @@ struct i40e_driver_version {\n };\n \n /* RX Descriptors */\n-union i40e_16byte_rx_desc {\n+union iavf_16byte_rx_desc {\n \tstruct {\n \t\t__le64 pkt_addr; /* Packet buffer address */\n \t\t__le64 hdr_addr; /* Header buffer address */\n@@ -229,7 +228,7 @@ union i40e_16byte_rx_desc {\n \t} wb;  /* writeback */\n };\n \n-union i40e_32byte_rx_desc {\n+union iavf_32byte_rx_desc {\n \tstruct {\n \t\t__le64  pkt_addr; /* Packet buffer address */\n \t\t__le64  hdr_addr; /* Header buffer address */\n@@ -278,7 +277,7 @@ union i40e_32byte_rx_desc {\n \t} wb;  /* writeback */\n };\n \n-enum i40e_rx_desc_status_bits {\n+enum iavf_rx_desc_status_bits {\n \t/* Note: These are predefined bit offsets */\n \tIAVF_RX_DESC_STATUS_DD_SHIFT\t\t= 0,\n \tIAVF_RX_DESC_STATUS_EOF_SHIFT\t\t= 1,\n@@ -302,29 +301,29 @@ enum i40e_rx_desc_status_bits {\n \tIAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */\n };\n \n-#define I40E_RXD_QW1_STATUS_SHIFT\t0\n-#define I40E_RXD_QW1_STATUS_MASK\t((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \\\n-\t\t\t\t\t << I40E_RXD_QW1_STATUS_SHIFT)\n+#define IAVF_RXD_QW1_STATUS_SHIFT\t0\n+#define IAVF_RXD_QW1_STATUS_MASK\t((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \\\n+\t\t\t\t\t << IAVF_RXD_QW1_STATUS_SHIFT)\n \n-#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT\n-#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK\t(0x3UL << \\\n-\t\t\t\t\t     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)\n+#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT   IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT\n+#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK\t(0x3UL << \\\n+\t\t\t\t\t     IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT)\n \n-#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT\n-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \\\n-\t\t\t\t    BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)\n+#define IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT  IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT\n+#define IAVF_RXD_QW1_STATUS_TSYNVALID_MASK \\\n+\t\t\t\t    BIT_ULL(IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT)\n \n-enum i40e_rx_desc_fltstat_values {\n+enum iavf_rx_desc_fltstat_values {\n \tIAVF_RX_DESC_FLTSTAT_NO_DATA\t= 0,\n \tIAVF_RX_DESC_FLTSTAT_RSV_FD_ID\t= 1, /* 16byte desc? FD_ID : RSV */\n \tIAVF_RX_DESC_FLTSTAT_RSV\t= 2,\n \tIAVF_RX_DESC_FLTSTAT_RSS_HASH\t= 3,\n };\n \n-#define I40E_RXD_QW1_ERROR_SHIFT\t19\n-#define I40E_RXD_QW1_ERROR_MASK\t\t(0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)\n+#define IAVF_RXD_QW1_ERROR_SHIFT\t19\n+#define IAVF_RXD_QW1_ERROR_MASK\t\t(0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)\n \n-enum i40e_rx_desc_error_bits {\n+enum iavf_rx_desc_error_bits {\n \t/* Note: These are predefined bit offsets */\n \tIAVF_RX_DESC_ERROR_RXE_SHIFT\t\t= 0,\n \tIAVF_RX_DESC_ERROR_RECIPE_SHIFT\t\t= 1,\n@@ -337,7 +336,7 @@ enum i40e_rx_desc_error_bits {\n \tIAVF_RX_DESC_ERROR_PPRS_SHIFT\t\t= 7\n };\n \n-enum i40e_rx_desc_error_l3l4e_fcoe_masks {\n+enum iavf_rx_desc_error_l3l4e_fcoe_masks {\n \tIAVF_RX_DESC_ERROR_L3L4E_NONE\t\t= 0,\n \tIAVF_RX_DESC_ERROR_L3L4E_PROT\t\t= 1,\n \tIAVF_RX_DESC_ERROR_L3L4E_FC\t\t= 2,\n@@ -345,40 +344,40 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {\n \tIAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN\t= 4\n };\n \n-#define I40E_RXD_QW1_PTYPE_SHIFT\t30\n-#define I40E_RXD_QW1_PTYPE_MASK\t\t(0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)\n+#define IAVF_RXD_QW1_PTYPE_SHIFT\t30\n+#define IAVF_RXD_QW1_PTYPE_MASK\t\t(0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)\n \n /* Packet type non-ip values */\n-enum i40e_rx_l2_ptype {\n-\tI40E_RX_PTYPE_L2_RESERVED\t\t\t= 0,\n-\tI40E_RX_PTYPE_L2_MAC_PAY2\t\t\t= 1,\n-\tI40E_RX_PTYPE_L2_TIMESYNC_PAY2\t\t\t= 2,\n-\tI40E_RX_PTYPE_L2_FIP_PAY2\t\t\t= 3,\n-\tI40E_RX_PTYPE_L2_OUI_PAY2\t\t\t= 4,\n-\tI40E_RX_PTYPE_L2_MACCNTRL_PAY2\t\t\t= 5,\n-\tI40E_RX_PTYPE_L2_LLDP_PAY2\t\t\t= 6,\n-\tI40E_RX_PTYPE_L2_ECP_PAY2\t\t\t= 7,\n-\tI40E_RX_PTYPE_L2_EVB_PAY2\t\t\t= 8,\n-\tI40E_RX_PTYPE_L2_QCN_PAY2\t\t\t= 9,\n-\tI40E_RX_PTYPE_L2_EAPOL_PAY2\t\t\t= 10,\n-\tI40E_RX_PTYPE_L2_ARP\t\t\t\t= 11,\n-\tI40E_RX_PTYPE_L2_FCOE_PAY3\t\t\t= 12,\n-\tI40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3\t\t= 13,\n-\tI40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3\t\t= 14,\n-\tI40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3\t\t= 15,\n-\tI40E_RX_PTYPE_L2_FCOE_FCOTHER_PA\t\t= 16,\n-\tI40E_RX_PTYPE_L2_FCOE_VFT_PAY3\t\t\t= 17,\n-\tI40E_RX_PTYPE_L2_FCOE_VFT_FCDATA\t\t= 18,\n-\tI40E_RX_PTYPE_L2_FCOE_VFT_FCRDY\t\t\t= 19,\n-\tI40E_RX_PTYPE_L2_FCOE_VFT_FCRSP\t\t\t= 20,\n-\tI40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER\t\t= 21,\n-\tI40E_RX_PTYPE_GRENAT4_MAC_PAY3\t\t\t= 58,\n-\tI40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4\t= 87,\n-\tI40E_RX_PTYPE_GRENAT6_MAC_PAY3\t\t\t= 124,\n-\tI40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4\t= 153\n-};\n-\n-struct i40e_rx_ptype_decoded {\n+enum iavf_rx_l2_ptype {\n+\tIAVF_RX_PTYPE_L2_RESERVED\t\t\t= 0,\n+\tIAVF_RX_PTYPE_L2_MAC_PAY2\t\t\t= 1,\n+\tIAVF_RX_PTYPE_L2_TIMESYNC_PAY2\t\t\t= 2,\n+\tIAVF_RX_PTYPE_L2_FIP_PAY2\t\t\t= 3,\n+\tIAVF_RX_PTYPE_L2_OUI_PAY2\t\t\t= 4,\n+\tIAVF_RX_PTYPE_L2_MACCNTRL_PAY2\t\t\t= 5,\n+\tIAVF_RX_PTYPE_L2_LLDP_PAY2\t\t\t= 6,\n+\tIAVF_RX_PTYPE_L2_ECP_PAY2\t\t\t= 7,\n+\tIAVF_RX_PTYPE_L2_EVB_PAY2\t\t\t= 8,\n+\tIAVF_RX_PTYPE_L2_QCN_PAY2\t\t\t= 9,\n+\tIAVF_RX_PTYPE_L2_EAPOL_PAY2\t\t\t= 10,\n+\tIAVF_RX_PTYPE_L2_ARP\t\t\t\t= 11,\n+\tIAVF_RX_PTYPE_L2_FCOE_PAY3\t\t\t= 12,\n+\tIAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3\t\t= 13,\n+\tIAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3\t\t= 14,\n+\tIAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3\t\t= 15,\n+\tIAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA\t\t= 16,\n+\tIAVF_RX_PTYPE_L2_FCOE_VFT_PAY3\t\t\t= 17,\n+\tIAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA\t\t= 18,\n+\tIAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY\t\t\t= 19,\n+\tIAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP\t\t\t= 20,\n+\tIAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER\t\t= 21,\n+\tIAVF_RX_PTYPE_GRENAT4_MAC_PAY3\t\t\t= 58,\n+\tIAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4\t= 87,\n+\tIAVF_RX_PTYPE_GRENAT6_MAC_PAY3\t\t\t= 124,\n+\tIAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4\t= 153\n+};\n+\n+struct iavf_rx_ptype_decoded {\n \tu32 ptype:8;\n \tu32 known:1;\n \tu32 outer_ip:1;\n@@ -391,64 +390,64 @@ struct i40e_rx_ptype_decoded {\n \tu32 payload_layer:3;\n };\n \n-enum i40e_rx_ptype_outer_ip {\n-\tI40E_RX_PTYPE_OUTER_L2\t= 0,\n-\tI40E_RX_PTYPE_OUTER_IP\t= 1\n+enum iavf_rx_ptype_outer_ip {\n+\tIAVF_RX_PTYPE_OUTER_L2\t= 0,\n+\tIAVF_RX_PTYPE_OUTER_IP\t= 1\n };\n \n-enum i40e_rx_ptype_outer_ip_ver {\n-\tI40E_RX_PTYPE_OUTER_NONE\t= 0,\n-\tI40E_RX_PTYPE_OUTER_IPV4\t= 0,\n-\tI40E_RX_PTYPE_OUTER_IPV6\t= 1\n+enum iavf_rx_ptype_outer_ip_ver {\n+\tIAVF_RX_PTYPE_OUTER_NONE\t= 0,\n+\tIAVF_RX_PTYPE_OUTER_IPV4\t= 0,\n+\tIAVF_RX_PTYPE_OUTER_IPV6\t= 1\n };\n \n-enum i40e_rx_ptype_outer_fragmented {\n-\tI40E_RX_PTYPE_NOT_FRAG\t= 0,\n-\tI40E_RX_PTYPE_FRAG\t= 1\n+enum iavf_rx_ptype_outer_fragmented {\n+\tIAVF_RX_PTYPE_NOT_FRAG\t= 0,\n+\tIAVF_RX_PTYPE_FRAG\t= 1\n };\n \n-enum i40e_rx_ptype_tunnel_type {\n-\tI40E_RX_PTYPE_TUNNEL_NONE\t\t= 0,\n-\tI40E_RX_PTYPE_TUNNEL_IP_IP\t\t= 1,\n-\tI40E_RX_PTYPE_TUNNEL_IP_GRENAT\t\t= 2,\n-\tI40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC\t= 3,\n-\tI40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN\t= 4,\n+enum iavf_rx_ptype_tunnel_type {\n+\tIAVF_RX_PTYPE_TUNNEL_NONE\t\t= 0,\n+\tIAVF_RX_PTYPE_TUNNEL_IP_IP\t\t= 1,\n+\tIAVF_RX_PTYPE_TUNNEL_IP_GRENAT\t\t= 2,\n+\tIAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC\t= 3,\n+\tIAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN\t= 4,\n };\n \n-enum i40e_rx_ptype_tunnel_end_prot {\n-\tI40E_RX_PTYPE_TUNNEL_END_NONE\t= 0,\n-\tI40E_RX_PTYPE_TUNNEL_END_IPV4\t= 1,\n-\tI40E_RX_PTYPE_TUNNEL_END_IPV6\t= 2,\n+enum iavf_rx_ptype_tunnel_end_prot {\n+\tIAVF_RX_PTYPE_TUNNEL_END_NONE\t= 0,\n+\tIAVF_RX_PTYPE_TUNNEL_END_IPV4\t= 1,\n+\tIAVF_RX_PTYPE_TUNNEL_END_IPV6\t= 2,\n };\n \n-enum i40e_rx_ptype_inner_prot {\n-\tI40E_RX_PTYPE_INNER_PROT_NONE\t\t= 0,\n-\tI40E_RX_PTYPE_INNER_PROT_UDP\t\t= 1,\n-\tI40E_RX_PTYPE_INNER_PROT_TCP\t\t= 2,\n-\tI40E_RX_PTYPE_INNER_PROT_SCTP\t\t= 3,\n-\tI40E_RX_PTYPE_INNER_PROT_ICMP\t\t= 4,\n-\tI40E_RX_PTYPE_INNER_PROT_TIMESYNC\t= 5\n+enum iavf_rx_ptype_inner_prot {\n+\tIAVF_RX_PTYPE_INNER_PROT_NONE\t\t= 0,\n+\tIAVF_RX_PTYPE_INNER_PROT_UDP\t\t= 1,\n+\tIAVF_RX_PTYPE_INNER_PROT_TCP\t\t= 2,\n+\tIAVF_RX_PTYPE_INNER_PROT_SCTP\t\t= 3,\n+\tIAVF_RX_PTYPE_INNER_PROT_ICMP\t\t= 4,\n+\tIAVF_RX_PTYPE_INNER_PROT_TIMESYNC\t= 5\n };\n \n-enum i40e_rx_ptype_payload_layer {\n-\tI40E_RX_PTYPE_PAYLOAD_LAYER_NONE\t= 0,\n-\tI40E_RX_PTYPE_PAYLOAD_LAYER_PAY2\t= 1,\n-\tI40E_RX_PTYPE_PAYLOAD_LAYER_PAY3\t= 2,\n-\tI40E_RX_PTYPE_PAYLOAD_LAYER_PAY4\t= 3,\n+enum iavf_rx_ptype_payload_layer {\n+\tIAVF_RX_PTYPE_PAYLOAD_LAYER_NONE\t= 0,\n+\tIAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2\t= 1,\n+\tIAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3\t= 2,\n+\tIAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4\t= 3,\n };\n \n-#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT\t38\n-#define I40E_RXD_QW1_LENGTH_PBUF_MASK\t(0x3FFFULL << \\\n-\t\t\t\t\t I40E_RXD_QW1_LENGTH_PBUF_SHIFT)\n+#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT\t38\n+#define IAVF_RXD_QW1_LENGTH_PBUF_MASK\t(0x3FFFULL << \\\n+\t\t\t\t\t IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)\n \n-#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT\t52\n-#define I40E_RXD_QW1_LENGTH_HBUF_MASK\t(0x7FFULL << \\\n-\t\t\t\t\t I40E_RXD_QW1_LENGTH_HBUF_SHIFT)\n+#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT\t52\n+#define IAVF_RXD_QW1_LENGTH_HBUF_MASK\t(0x7FFULL << \\\n+\t\t\t\t\t IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)\n \n-#define I40E_RXD_QW1_LENGTH_SPH_SHIFT\t63\n-#define I40E_RXD_QW1_LENGTH_SPH_MASK\tBIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)\n+#define IAVF_RXD_QW1_LENGTH_SPH_SHIFT\t63\n+#define IAVF_RXD_QW1_LENGTH_SPH_MASK\tBIT_ULL(IAVF_RXD_QW1_LENGTH_SPH_SHIFT)\n \n-enum i40e_rx_desc_ext_status_bits {\n+enum iavf_rx_desc_ext_status_bits {\n \t/* Note: These are predefined bit offsets */\n \tIAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT\t= 0,\n \tIAVF_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT\t= 1,\n@@ -459,7 +458,7 @@ enum i40e_rx_desc_ext_status_bits {\n \tIAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT\t= 11,\n };\n \n-enum i40e_rx_desc_pe_status_bits {\n+enum iavf_rx_desc_pe_status_bits {\n \t/* Note: These are predefined bit offsets */\n \tIAVF_RX_DESC_PE_STATUS_QPID_SHIFT\t= 0, /* 18 BITS */\n \tIAVF_RX_DESC_PE_STATUS_L4PORT_SHIFT\t= 0, /* 16 BITS */\n@@ -472,47 +471,47 @@ enum i40e_rx_desc_pe_status_bits {\n \tIAVF_RX_DESC_PE_STATUS_IPOPT_SHIFT\t= 29\n };\n \n-#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT\t\t38\n-#define I40E_RX_PROG_STATUS_DESC_LENGTH\t\t\t0x2000000\n+#define IAVF_RX_PROG_STATUS_DESC_LENGTH_SHIFT\t\t38\n+#define IAVF_RX_PROG_STATUS_DESC_LENGTH\t\t\t0x2000000\n \n-#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT\t2\n-#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK\t(0x7UL << \\\n-\t\t\t\tI40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)\n+#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT\t2\n+#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_MASK\t(0x7UL << \\\n+\t\t\t\tIAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)\n \n-#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT\t19\n-#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK\t\t(0x3FUL << \\\n-\t\t\t\tI40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)\n+#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT\t19\n+#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_MASK\t\t(0x3FUL << \\\n+\t\t\t\tIAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)\n \n-enum i40e_rx_prog_status_desc_status_bits {\n+enum iavf_rx_prog_status_desc_status_bits {\n \t/* Note: These are predefined bit offsets */\n-\tI40E_RX_PROG_STATUS_DESC_DD_SHIFT\t= 0,\n-\tI40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT\t= 2 /* 3 BITS */\n+\tIAVF_RX_PROG_STATUS_DESC_DD_SHIFT\t= 0,\n+\tIAVF_RX_PROG_STATUS_DESC_PROG_ID_SHIFT\t= 2 /* 3 BITS */\n };\n \n-enum i40e_rx_prog_status_desc_prog_id_masks {\n-\tI40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS\t= 1,\n-\tI40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS\t= 2,\n-\tI40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS\t= 4,\n+enum iavf_rx_prog_status_desc_prog_id_masks {\n+\tIAVF_RX_PROG_STATUS_DESC_FD_FILTER_STATUS\t= 1,\n+\tIAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS\t= 2,\n+\tIAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS\t= 4,\n };\n \n-enum i40e_rx_prog_status_desc_error_bits {\n+enum iavf_rx_prog_status_desc_error_bits {\n \t/* Note: These are predefined bit offsets */\n-\tI40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT\t= 0,\n-\tI40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT\t= 1,\n-\tI40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT\t= 2,\n-\tI40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT\t= 3\n+\tIAVF_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT\t= 0,\n+\tIAVF_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT\t= 1,\n+\tIAVF_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT\t= 2,\n+\tIAVF_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT\t= 3\n };\n \n /* TX Descriptor */\n-struct i40e_tx_desc {\n+struct iavf_tx_desc {\n \t__le64 buffer_addr; /* Address of descriptor's data buf */\n \t__le64 cmd_type_offset_bsz;\n };\n \n-#define I40E_TXD_QW1_DTYPE_SHIFT\t0\n-#define I40E_TXD_QW1_DTYPE_MASK\t\t(0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)\n+#define IAVF_TXD_QW1_DTYPE_SHIFT\t0\n+#define IAVF_TXD_QW1_DTYPE_MASK\t\t(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)\n \n-enum i40e_tx_desc_dtype_value {\n+enum iavf_tx_desc_dtype_value {\n \tIAVF_TX_DESC_DTYPE_DATA\t\t= 0x0,\n \tIAVF_TX_DESC_DTYPE_NOP\t\t= 0x1, /* same as Context desc */\n \tIAVF_TX_DESC_DTYPE_CONTEXT\t= 0x1,\n@@ -525,10 +524,10 @@ enum i40e_tx_desc_dtype_value {\n \tIAVF_TX_DESC_DTYPE_DESC_DONE\t= 0xF\n };\n \n-#define I40E_TXD_QW1_CMD_SHIFT\t4\n-#define I40E_TXD_QW1_CMD_MASK\t(0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)\n+#define IAVF_TXD_QW1_CMD_SHIFT\t4\n+#define IAVF_TXD_QW1_CMD_MASK\t(0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT)\n \n-enum i40e_tx_desc_cmd_bits {\n+enum iavf_tx_desc_cmd_bits {\n \tIAVF_TX_DESC_CMD_EOP\t\t\t= 0x0001,\n \tIAVF_TX_DESC_CMD_RS\t\t\t= 0x0002,\n \tIAVF_TX_DESC_CMD_ICRC\t\t\t= 0x0004,\n@@ -549,154 +548,130 @@ enum i40e_tx_desc_cmd_bits {\n \tIAVF_TX_DESC_CMD_L4T_EOFT_EOF_A\t\t= 0x0300, /* 2 BITS */\n };\n \n-#define I40E_TXD_QW1_OFFSET_SHIFT\t16\n-#define I40E_TXD_QW1_OFFSET_MASK\t(0x3FFFFULL << \\\n-\t\t\t\t\t I40E_TXD_QW1_OFFSET_SHIFT)\n+#define IAVF_TXD_QW1_OFFSET_SHIFT\t16\n+#define IAVF_TXD_QW1_OFFSET_MASK\t(0x3FFFFULL << \\\n+\t\t\t\t\t IAVF_TXD_QW1_OFFSET_SHIFT)\n \n-enum i40e_tx_desc_length_fields {\n+enum iavf_tx_desc_length_fields {\n \t/* Note: These are predefined bit offsets */\n \tIAVF_TX_DESC_LENGTH_MACLEN_SHIFT\t= 0, /* 7 BITS */\n \tIAVF_TX_DESC_LENGTH_IPLEN_SHIFT\t\t= 7, /* 7 BITS */\n \tIAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT\t= 14 /* 4 BITS */\n };\n \n-#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT\t34\n-#define I40E_TXD_QW1_TX_BUF_SZ_MASK\t(0x3FFFULL << \\\n-\t\t\t\t\t I40E_TXD_QW1_TX_BUF_SZ_SHIFT)\n+#define IAVF_TXD_QW1_TX_BUF_SZ_SHIFT\t34\n+#define IAVF_TXD_QW1_TX_BUF_SZ_MASK\t(0x3FFFULL << \\\n+\t\t\t\t\t IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)\n \n-#define I40E_TXD_QW1_L2TAG1_SHIFT\t48\n-#define I40E_TXD_QW1_L2TAG1_MASK\t(0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)\n+#define IAVF_TXD_QW1_L2TAG1_SHIFT\t48\n+#define IAVF_TXD_QW1_L2TAG1_MASK\t(0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT)\n \n /* Context descriptors */\n-struct i40e_tx_context_desc {\n+struct iavf_tx_context_desc {\n \t__le32 tunneling_params;\n \t__le16 l2tag2;\n \t__le16 rsvd;\n \t__le64 type_cmd_tso_mss;\n };\n \n-#define I40E_TXD_CTX_QW1_CMD_SHIFT\t4\n-#define I40E_TXD_CTX_QW1_CMD_MASK\t(0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)\n+#define IAVF_TXD_CTX_QW1_CMD_SHIFT\t4\n+#define IAVF_TXD_CTX_QW1_CMD_MASK\t(0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT)\n \n-enum i40e_tx_ctx_desc_cmd_bits {\n-\tI40E_TX_CTX_DESC_TSO\t\t= 0x01,\n-\tI40E_TX_CTX_DESC_TSYN\t\t= 0x02,\n-\tI40E_TX_CTX_DESC_IL2TAG2\t= 0x04,\n-\tI40E_TX_CTX_DESC_IL2TAG2_IL2H\t= 0x08,\n-\tI40E_TX_CTX_DESC_SWTCH_NOTAG\t= 0x00,\n-\tI40E_TX_CTX_DESC_SWTCH_UPLINK\t= 0x10,\n-\tI40E_TX_CTX_DESC_SWTCH_LOCAL\t= 0x20,\n-\tI40E_TX_CTX_DESC_SWTCH_VSI\t= 0x30,\n-\tI40E_TX_CTX_DESC_SWPE\t\t= 0x40\n+enum iavf_tx_ctx_desc_cmd_bits {\n+\tIAVF_TX_CTX_DESC_TSO\t\t= 0x01,\n+\tIAVF_TX_CTX_DESC_TSYN\t\t= 0x02,\n+\tIAVF_TX_CTX_DESC_IL2TAG2\t= 0x04,\n+\tIAVF_TX_CTX_DESC_IL2TAG2_IL2H\t= 0x08,\n+\tIAVF_TX_CTX_DESC_SWTCH_NOTAG\t= 0x00,\n+\tIAVF_TX_CTX_DESC_SWTCH_UPLINK\t= 0x10,\n+\tIAVF_TX_CTX_DESC_SWTCH_LOCAL\t= 0x20,\n+\tIAVF_TX_CTX_DESC_SWTCH_VSI\t= 0x30,\n+\tIAVF_TX_CTX_DESC_SWPE\t\t= 0x40\n };\n \n-#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT\t30\n-#define I40E_TXD_CTX_QW1_TSO_LEN_MASK\t(0x3FFFFULL << \\\n-\t\t\t\t\t I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)\n-\n-#define I40E_TXD_CTX_QW1_MSS_SHIFT\t50\n-#define I40E_TXD_CTX_QW1_MSS_MASK\t(0x3FFFULL << \\\n-\t\t\t\t\t I40E_TXD_CTX_QW1_MSS_SHIFT)\n-\n-#define I40E_TXD_CTX_QW1_VSI_SHIFT\t50\n-#define I40E_TXD_CTX_QW1_VSI_MASK\t(0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)\n-\n-#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT\t0\n-#define I40E_TXD_CTX_QW0_EXT_IP_MASK\t(0x3ULL << \\\n-\t\t\t\t\t I40E_TXD_CTX_QW0_EXT_IP_SHIFT)\n-\n-enum i40e_tx_ctx_desc_eipt_offload {\n-\tI40E_TX_CTX_EXT_IP_NONE\t\t= 0x0,\n-\tI40E_TX_CTX_EXT_IP_IPV6\t\t= 0x1,\n-\tI40E_TX_CTX_EXT_IP_IPV4_NO_CSUM\t= 0x2,\n-\tI40E_TX_CTX_EXT_IP_IPV4\t\t= 0x3\n-};\n-\n-#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT\t2\n-#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK\t(0x3FULL << \\\n-\t\t\t\t\t I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)\n-\n-#define I40E_TXD_CTX_QW0_NATT_SHIFT\t9\n-#define I40E_TXD_CTX_QW0_NATT_MASK\t(0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)\n-\n-#define I40E_TXD_CTX_UDP_TUNNELING\tBIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)\n-#define I40E_TXD_CTX_GRE_TUNNELING\t(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)\n-\n-#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT\t11\n-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \\\n-\t\t\t\t       BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)\n-\n-#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST\tI40E_TXD_CTX_QW0_EIP_NOINC_MASK\n-\n-#define I40E_TXD_CTX_QW0_NATLEN_SHIFT\t12\n-#define I40E_TXD_CTX_QW0_NATLEN_MASK\t(0X7FULL << \\\n-\t\t\t\t\t I40E_TXD_CTX_QW0_NATLEN_SHIFT)\n-\n-#define I40E_TXD_CTX_QW0_DECTTL_SHIFT\t19\n-#define I40E_TXD_CTX_QW0_DECTTL_MASK\t(0xFULL << \\\n-\t\t\t\t\t I40E_TXD_CTX_QW0_DECTTL_SHIFT)\n-\n-#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT\t23\n-#define I40E_TXD_CTX_QW0_L4T_CS_MASK\tBIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)\n-\n /* Packet Classifier Types for filters */\n-enum i40e_filter_pctype {\n+enum iavf_filter_pctype {\n \t/* Note: Values 0-28 are reserved for future use.\n \t * Value 29, 30, 32 are not supported on XL710 and X710.\n \t */\n-\tI40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP\t= 29,\n-\tI40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP\t= 30,\n-\tI40E_FILTER_PCTYPE_NONF_IPV4_UDP\t\t= 31,\n-\tI40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK\t= 32,\n-\tI40E_FILTER_PCTYPE_NONF_IPV4_TCP\t\t= 33,\n-\tI40E_FILTER_PCTYPE_NONF_IPV4_SCTP\t\t= 34,\n-\tI40E_FILTER_PCTYPE_NONF_IPV4_OTHER\t\t= 35,\n-\tI40E_FILTER_PCTYPE_FRAG_IPV4\t\t\t= 36,\n+\tIAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP\t= 29,\n+\tIAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP\t= 30,\n+\tIAVF_FILTER_PCTYPE_NONF_IPV4_UDP\t\t= 31,\n+\tIAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK\t= 32,\n+\tIAVF_FILTER_PCTYPE_NONF_IPV4_TCP\t\t= 33,\n+\tIAVF_FILTER_PCTYPE_NONF_IPV4_SCTP\t\t= 34,\n+\tIAVF_FILTER_PCTYPE_NONF_IPV4_OTHER\t\t= 35,\n+\tIAVF_FILTER_PCTYPE_FRAG_IPV4\t\t\t= 36,\n \t/* Note: Values 37-38 are reserved for future use.\n \t * Value 39, 40, 42 are not supported on XL710 and X710.\n \t */\n-\tI40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP\t= 39,\n-\tI40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP\t= 40,\n-\tI40E_FILTER_PCTYPE_NONF_IPV6_UDP\t\t= 41,\n-\tI40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK\t= 42,\n-\tI40E_FILTER_PCTYPE_NONF_IPV6_TCP\t\t= 43,\n-\tI40E_FILTER_PCTYPE_NONF_IPV6_SCTP\t\t= 44,\n-\tI40E_FILTER_PCTYPE_NONF_IPV6_OTHER\t\t= 45,\n-\tI40E_FILTER_PCTYPE_FRAG_IPV6\t\t\t= 46,\n+\tIAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP\t= 39,\n+\tIAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP\t= 40,\n+\tIAVF_FILTER_PCTYPE_NONF_IPV6_UDP\t\t= 41,\n+\tIAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK\t= 42,\n+\tIAVF_FILTER_PCTYPE_NONF_IPV6_TCP\t\t= 43,\n+\tIAVF_FILTER_PCTYPE_NONF_IPV6_SCTP\t\t= 44,\n+\tIAVF_FILTER_PCTYPE_NONF_IPV6_OTHER\t\t= 45,\n+\tIAVF_FILTER_PCTYPE_FRAG_IPV6\t\t\t= 46,\n \t/* Note: Value 47 is reserved for future use */\n-\tI40E_FILTER_PCTYPE_FCOE_OX\t\t\t= 48,\n-\tI40E_FILTER_PCTYPE_FCOE_RX\t\t\t= 49,\n-\tI40E_FILTER_PCTYPE_FCOE_OTHER\t\t\t= 50,\n+\tIAVF_FILTER_PCTYPE_FCOE_OX\t\t\t= 48,\n+\tIAVF_FILTER_PCTYPE_FCOE_RX\t\t\t= 49,\n+\tIAVF_FILTER_PCTYPE_FCOE_OTHER\t\t\t= 50,\n \t/* Note: Values 51-62 are reserved for future use */\n-\tI40E_FILTER_PCTYPE_L2_PAYLOAD\t\t\t= 63,\n+\tIAVF_FILTER_PCTYPE_L2_PAYLOAD\t\t\t= 63,\n };\n \n+#define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT\t30\n+#define IAVF_TXD_CTX_QW1_TSO_LEN_MASK\t(0x3FFFFULL << \\\n+\t\t\t\t\t IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT)\n \n-struct i40e_vsi_context {\n-\tu16 seid;\n-\tu16 uplink_seid;\n-\tu16 vsi_number;\n-\tu16 vsis_allocated;\n-\tu16 vsis_unallocated;\n-\tu16 flags;\n-\tu8 pf_num;\n-\tu8 vf_num;\n-\tu8 connection_type;\n-\tstruct i40e_aqc_vsi_properties_data info;\n-};\n+#define IAVF_TXD_CTX_QW1_MSS_SHIFT\t50\n+#define IAVF_TXD_CTX_QW1_MSS_MASK\t(0x3FFFULL << \\\n+\t\t\t\t\t IAVF_TXD_CTX_QW1_MSS_SHIFT)\n+\n+#define IAVF_TXD_CTX_QW1_VSI_SHIFT\t50\n+#define IAVF_TXD_CTX_QW1_VSI_MASK\t(0x1FFULL << IAVF_TXD_CTX_QW1_VSI_SHIFT)\n \n-struct i40e_veb_context {\n-\tu16 seid;\n-\tu16 uplink_seid;\n-\tu16 veb_number;\n-\tu16 vebs_allocated;\n-\tu16 vebs_unallocated;\n-\tu16 flags;\n-\tstruct i40e_aqc_get_veb_parameters_completion info;\n+#define IAVF_TXD_CTX_QW0_EXT_IP_SHIFT\t0\n+#define IAVF_TXD_CTX_QW0_EXT_IP_MASK\t(0x3ULL << \\\n+\t\t\t\t\t IAVF_TXD_CTX_QW0_EXT_IP_SHIFT)\n+\n+enum iavf_tx_ctx_desc_eipt_offload {\n+\tIAVF_TX_CTX_EXT_IP_NONE\t\t= 0x0,\n+\tIAVF_TX_CTX_EXT_IP_IPV6\t\t= 0x1,\n+\tIAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM\t= 0x2,\n+\tIAVF_TX_CTX_EXT_IP_IPV4\t\t= 0x3\n };\n \n+#define IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT\t2\n+#define IAVF_TXD_CTX_QW0_EXT_IPLEN_MASK\t(0x3FULL << \\\n+\t\t\t\t\t IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT)\n+\n+#define IAVF_TXD_CTX_QW0_NATT_SHIFT\t9\n+#define IAVF_TXD_CTX_QW0_NATT_MASK\t(0x3ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT)\n+\n+#define IAVF_TXD_CTX_UDP_TUNNELING\tBIT_ULL(IAVF_TXD_CTX_QW0_NATT_SHIFT)\n+#define IAVF_TXD_CTX_GRE_TUNNELING\t(0x2ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT)\n+\n+#define IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT\t11\n+#define IAVF_TXD_CTX_QW0_EIP_NOINC_MASK \\\n+\t\t\t\t       BIT_ULL(IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT)\n+\n+#define IAVF_TXD_CTX_EIP_NOINC_IPID_CONST\tIAVF_TXD_CTX_QW0_EIP_NOINC_MASK\n+\n+#define IAVF_TXD_CTX_QW0_NATLEN_SHIFT\t12\n+#define IAVF_TXD_CTX_QW0_NATLEN_MASK\t(0X7FULL << \\\n+\t\t\t\t\t IAVF_TXD_CTX_QW0_NATLEN_SHIFT)\n+\n+#define IAVF_TXD_CTX_QW0_DECTTL_SHIFT\t19\n+#define IAVF_TXD_CTX_QW0_DECTTL_MASK\t(0xFULL << \\\n+\t\t\t\t\t IAVF_TXD_CTX_QW0_DECTTL_SHIFT)\n+\n+#define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT\t23\n+#define IAVF_TXD_CTX_QW0_L4T_CS_MASK\tBIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT)\n+\n /* Statistics collected by each port, VSI, VEB, and S-channel */\n-struct i40e_eth_stats {\n+struct iavf_eth_stats {\n \tu64 rx_bytes;\t\t\t/* gorc */\n \tu64 rx_unicast;\t\t\t/* uprc */\n \tu64 rx_multicast;\t\t/* mprc */\n@@ -710,4 +685,4 @@ struct i40e_eth_stats {\n \tu64 tx_discards;\t\t/* tdpc */\n \tu64 tx_errors;\t\t\t/* tepc */\n };\n-#endif /* _I40E_TYPE_H_ */\n+#endif /* _IAVF_TYPE_H_ */\ndiff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h\nindex 8e1a9f0348fe..1d973b4cd973 100644\n--- a/drivers/net/ethernet/intel/iavf/iavf.h\n+++ b/drivers/net/ethernet/intel/iavf/iavf.h\n@@ -43,19 +43,19 @@\n \n /* VSI state flags shared with common code */\n enum iavf_vsi_state_t {\n-\t__I40E_VSI_DOWN,\n+\t__IAVF_VSI_DOWN,\n \t/* This must be last as it determines the size of the BITMAP */\n-\t__I40E_VSI_STATE_SIZE__,\n+\t__IAVF_VSI_STATE_SIZE__,\n };\n \n /* dummy struct to make common code less painful */\n-struct i40e_vsi {\n+struct iavf_vsi {\n \tstruct iavf_adapter *back;\n \tstruct net_device *netdev;\n \tunsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];\n \tu16 seid;\n \tu16 id;\n-\tDECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);\n+\tDECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);\n \tint base_vector;\n \tu16 work_limit;\n \tu16 qs_handle;\n@@ -77,10 +77,10 @@ struct i40e_vsi {\n \n #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)\n \n-#define IAVF_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))\n-#define IAVF_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))\n+#define IAVF_RX_DESC(R, i) (&(((union iavf_32byte_rx_desc *)((R)->desc))[i]))\n+#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))\n #define IAVF_TX_CTXTDESC(R, i) \\\n-\t(&(((struct i40e_tx_context_desc *)((R)->desc))[i]))\n+\t(&(((struct iavf_tx_context_desc *)((R)->desc))[i]))\n #define IAVF_MAX_REQ_QUEUES 4\n \n #define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)\n@@ -90,12 +90,12 @@ struct i40e_vsi {\n /* MAX_MSIX_Q_VECTORS of these are allocated,\n  * but we only use one per queue-specific vector.\n  */\n-struct i40e_q_vector {\n+struct iavf_q_vector {\n \tstruct iavf_adapter *adapter;\n-\tstruct i40e_vsi *vsi;\n+\tstruct iavf_vsi *vsi;\n \tstruct napi_struct napi;\n-\tstruct i40e_ring_container rx;\n-\tstruct i40e_ring_container tx;\n+\tstruct iavf_ring_container rx;\n+\tstruct iavf_ring_container tx;\n \tu32 ring_mask;\n \tu8 itr_countdown;\t/* when 0 should adjust adaptive ITR */\n \tu8 num_ringpairs;\t/* total number of ring pairs in vector */\n@@ -119,13 +119,6 @@ struct i40e_q_vector {\n \t((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \\\n \t(R)->next_to_clean - (R)->next_to_use - 1)\n \n-#define IAVF_RX_DESC_ADV(R, i)\t\\\n-\t(&(((union i40e_adv_rx_desc *)((R).desc))[i]))\n-#define IAVF_TX_DESC_ADV(R, i)\t\\\n-\t(&(((union i40e_adv_tx_desc *)((R).desc))[i]))\n-#define IAVF_TX_CTXTDESC_ADV(R, i)\t\\\n-\t(&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))\n-\n #define OTHER_VECTOR 1\n #define NONQ_VECS (OTHER_VECTOR)\n \n@@ -209,7 +202,7 @@ enum iavf_critical_section_t {\n #define IAVF_CF_FLAGS_IMAC_IVLAN_TEN_ID\t(IAVF_CLOUD_FIELD_IMAC |\\\n \t\t\t\t\t\t IAVF_CLOUD_FIELD_IVLAN |\\\n \t\t\t\t\t\t IAVF_CLOUD_FIELD_TEN_ID)\n-#define IAVF_CF_FLAGS_IIP\tI40E_CLOUD_FIELD_IIP\n+#define IAVF_CF_FLAGS_IIP\tIAVF_CLOUD_FIELD_IIP\n \n /* bookkeeping of cloud filters */\n struct iavf_cloud_filter {\n@@ -229,7 +222,7 @@ struct iavf_adapter {\n \tstruct delayed_work client_task;\n \tstruct delayed_work init_task;\n \twait_queue_head_t down_waitqueue;\n-\tstruct i40e_q_vector *q_vectors;\n+\tstruct iavf_q_vector *q_vectors;\n \tstruct list_head vlan_filter_list;\n \tstruct list_head mac_filter_list;\n \t/* Lock to protect accesses to MAC and VLAN lists */\n@@ -239,12 +232,12 @@ struct iavf_adapter {\n \tint num_req_queues;\n \n \t/* TX */\n-\tstruct i40e_ring *tx_rings;\n+\tstruct iavf_ring *tx_rings;\n \tu32 tx_timeout_count;\n \tu32 tx_desc_count;\n \n \t/* RX */\n-\tstruct i40e_ring *rx_rings;\n+\tstruct iavf_ring *rx_rings;\n \tu64 hw_csum_rx_error;\n \tu32 rx_desc_count;\n \tint num_msix_vectors;\n@@ -271,9 +264,7 @@ struct iavf_adapter {\n #define IAVF_FLAG_REINIT_ITR_NEEDED\t\tBIT(16)\n #define IAVF_FLAG_QUEUES_DISABLED\t\tBIT(17)\n /* duplicates for common code */\n-#define I40E_FLAG_DCB_ENABLED\t\t\t0\n-#define I40E_FLAG_RX_CSUM_ENABLED\t\tIAVF_FLAG_RX_CSUM_ENABLED\n-#define I40E_FLAG_LEGACY_RX\t\t\tIAVF_FLAG_LEGACY_RX\n+#define IAVF_FLAG_DCB_ENABLED\t\t\t0\n \t/* flags for admin queue service task */\n \tu32 aq_required;\n #define IAVF_FLAG_AQ_ENABLE_QUEUES\t\tBIT(0)\n@@ -338,8 +329,8 @@ struct iavf_adapter {\n #define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \\\n \t\t       ((_a)->pf_version.minor == 1))\n \tu16 msg_enable;\n-\tstruct i40e_eth_stats current_stats;\n-\tstruct i40e_vsi vsi;\n+\tstruct iavf_eth_stats current_stats;\n+\tstruct iavf_vsi vsi;\n \tu32 aq_wait_count;\n \t/* RSS stuff */\n \tu64 hena;\n@@ -359,7 +350,7 @@ struct iavf_adapter {\n \n /* Ethtool Private Flags */\n \n-/* lan device */\n+/* lan device, used by client interface */\n struct i40e_device {\n \tstruct list_head list;\n \tstruct iavf_adapter *vf;\n@@ -382,8 +373,8 @@ void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask);\n void iavf_free_all_tx_resources(struct iavf_adapter *adapter);\n void iavf_free_all_rx_resources(struct iavf_adapter *adapter);\n \n-void i40e_napi_add_all(struct iavf_adapter *adapter);\n-void i40e_napi_del_all(struct iavf_adapter *adapter);\n+void iavf_napi_add_all(struct iavf_adapter *adapter);\n+void iavf_napi_del_all(struct iavf_adapter *adapter);\n \n int iavf_send_api_ver(struct iavf_adapter *adapter);\n int iavf_verify_api_ver(struct iavf_adapter *adapter);\n@@ -416,10 +407,10 @@ int iavf_config_rss(struct iavf_adapter *adapter);\n int iavf_lan_add_device(struct iavf_adapter *adapter);\n int iavf_lan_del_device(struct iavf_adapter *adapter);\n void iavf_client_subtask(struct iavf_adapter *adapter);\n-void iavf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len);\n-void iavf_notify_client_l2_params(struct i40e_vsi *vsi);\n-void iavf_notify_client_open(struct i40e_vsi *vsi);\n-void iavf_notify_client_close(struct i40e_vsi *vsi, bool reset);\n+void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len);\n+void iavf_notify_client_l2_params(struct iavf_vsi *vsi);\n+void iavf_notify_client_open(struct iavf_vsi *vsi);\n+void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset);\n void iavf_enable_channels(struct iavf_adapter *adapter);\n void iavf_disable_channels(struct iavf_adapter *adapter);\n void iavf_add_cloud_filter(struct iavf_adapter *adapter);\ndiff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c\nindex 16971bfc5e43..f4c195a4167a 100644\n--- a/drivers/net/ethernet/intel/iavf/iavf_client.c\n+++ b/drivers/net/ethernet/intel/iavf/iavf_client.c\n@@ -11,7 +11,7 @@\n static\n const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR;\n static struct i40e_client *vf_registered_client;\n-static LIST_HEAD(iavf_devices);\n+static LIST_HEAD(i40e_devices);\n static DEFINE_MUTEX(iavf_device_mutex);\n \n static u32 iavf_client_virtchnl_send(struct i40e_info *ldev,\n@@ -33,7 +33,7 @@ static struct i40e_ops iavf_lan_ops = {\n  * @params: client param struct\n  **/\n static\n-void iavf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)\n+void iavf_client_get_params(struct iavf_vsi *vsi, struct i40e_params *params)\n {\n \tint i;\n \n@@ -41,7 +41,7 @@ void iavf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)\n \tparams->mtu = vsi->netdev->mtu;\n \tparams->link_up = vsi->back->link_up;\n \n-\tfor (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {\n+\tfor (i = 0; i < IAVF_MAX_USER_PRIORITY; i++) {\n \t\tparams->qos.prio_qos[i].tc = 0;\n \t\tparams->qos.prio_qos[i].qs_handle = vsi->qs_handle;\n \t}\n@@ -55,7 +55,7 @@ void iavf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)\n  *\n  * If there is a client to this VSI, call the client\n  **/\n-void iavf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)\n+void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len)\n {\n \tstruct i40e_client_instance *cinst;\n \n@@ -79,7 +79,7 @@ void iavf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)\n  *\n  * If there is a client to this VSI, call the client\n  **/\n-void iavf_notify_client_l2_params(struct i40e_vsi *vsi)\n+void iavf_notify_client_l2_params(struct iavf_vsi *vsi)\n {\n \tstruct i40e_client_instance *cinst;\n \tstruct i40e_params params;\n@@ -107,7 +107,7 @@ void iavf_notify_client_l2_params(struct i40e_vsi *vsi)\n  *\n  * If there is a client to this netdev, call the client with open\n  **/\n-void iavf_notify_client_open(struct i40e_vsi *vsi)\n+void iavf_notify_client_open(struct iavf_vsi *vsi)\n {\n \tstruct iavf_adapter *adapter = vsi->back;\n \tstruct i40e_client_instance *cinst = adapter->cinst;\n@@ -159,7 +159,7 @@ static int iavf_client_release_qvlist(struct i40e_info *ldev)\n  *\n  * If there is a client to this netdev, call the client with close\n  **/\n-void iavf_notify_client_close(struct i40e_vsi *vsi, bool reset)\n+void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset)\n {\n \tstruct iavf_adapter *adapter = vsi->back;\n \tstruct i40e_client_instance *cinst = adapter->cinst;\n@@ -185,7 +185,7 @@ static struct i40e_client_instance *\n iavf_client_add_instance(struct iavf_adapter *adapter)\n {\n \tstruct i40e_client_instance *cinst = NULL;\n-\tstruct i40e_vsi *vsi = &adapter->vsi;\n+\tstruct iavf_vsi *vsi = &adapter->vsi;\n \tstruct netdev_hw_addr *mac = NULL;\n \tstruct i40e_params params;\n \n@@ -295,7 +295,7 @@ int iavf_lan_add_device(struct iavf_adapter *adapter)\n \tint ret = 0;\n \n \tmutex_lock(&iavf_device_mutex);\n-\tlist_for_each_entry(ldev, &iavf_devices, list) {\n+\tlist_for_each_entry(ldev, &i40e_devices, list) {\n \t\tif (ldev->vf == adapter) {\n \t\t\tret = -EEXIST;\n \t\t\tgoto out;\n@@ -308,7 +308,7 @@ int iavf_lan_add_device(struct iavf_adapter *adapter)\n \t}\n \tldev->vf = adapter;\n \tINIT_LIST_HEAD(&ldev->list);\n-\tlist_add(&ldev->list, &iavf_devices);\n+\tlist_add(&ldev->list, &i40e_devices);\n \tdev_info(&adapter->pdev->dev, \"Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\\n\",\n \t\t adapter->hw.bus.bus_id, adapter->hw.bus.device,\n \t\t adapter->hw.bus.func);\n@@ -335,7 +335,7 @@ int iavf_lan_del_device(struct iavf_adapter *adapter)\n \tint ret = -ENODEV;\n \n \tmutex_lock(&iavf_device_mutex);\n-\tlist_for_each_entry_safe(ldev, tmp, &iavf_devices, list) {\n+\tlist_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {\n \t\tif (ldev->vf == adapter) {\n \t\t\tdev_info(&adapter->pdev->dev,\n \t\t\t\t \"Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\\n\",\n@@ -364,7 +364,7 @@ static void iavf_client_release(struct i40e_client *client)\n \tstruct iavf_adapter *adapter;\n \n \tmutex_lock(&iavf_device_mutex);\n-\tlist_for_each_entry(ldev, &iavf_devices, list) {\n+\tlist_for_each_entry(ldev, &i40e_devices, list) {\n \t\tadapter = ldev->vf;\n \t\tcinst = adapter->cinst;\n \t\tif (!cinst)\n@@ -398,7 +398,7 @@ static void iavf_client_prepare(struct i40e_client *client)\n \tstruct iavf_adapter *adapter;\n \n \tmutex_lock(&iavf_device_mutex);\n-\tlist_for_each_entry(ldev, &iavf_devices, list) {\n+\tlist_for_each_entry(ldev, &i40e_devices, list) {\n \t\tadapter = ldev->vf;\n \t\t/* Signal the watchdog to service the client */\n \t\tadapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;\ndiff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c\nindex 74a142802074..314b8230336a 100644\n--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c\n+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c\n@@ -9,57 +9,51 @@\n /* ethtool statistics helpers */\n \n /**\n- * struct i40e_stats - definition for an ethtool statistic\n+ * struct iavf_stats - definition for an ethtool statistic\n  * @stat_string: statistic name to display in ethtool -S output\n  * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)\n  * @stat_offset: offsetof() the stat from a base pointer\n  *\n  * This structure defines a statistic to be added to the ethtool stats buffer.\n  * It defines a statistic as offset from a common base pointer. Stats should\n- * be defined in constant arrays using the I40E_STAT macro, with every element\n+ * be defined in constant arrays using the IAVF_STAT macro, with every element\n  * of the array using the same _type for calculating the sizeof_stat and\n  * stat_offset.\n  *\n  * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or\n  * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from\n- * the i40e_add_ethtool_stat() helper function.\n+ * the iavf_add_ethtool_stat() helper function.\n  *\n  * The @stat_string is interpreted as a format string, allowing formatted\n  * values to be inserted while looping over multiple structures for a given\n  * statistics array. Thus, every statistic string in an array should have the\n  * same type and number of format specifiers, to be formatted by variadic\n- * arguments to the i40e_add_stat_string() helper function.\n+ * arguments to the iavf_add_stat_string() helper function.\n  **/\n-struct i40e_stats {\n+struct iavf_stats {\n \tchar stat_string[ETH_GSTRING_LEN];\n \tint sizeof_stat;\n \tint stat_offset;\n };\n \n-/* Helper macro to define an i40e_stat structure with proper size and type.\n+/* Helper macro to define an iavf_stat structure with proper size and type.\n  * Use this when defining constant statistics arrays. Note that @_type expects\n  * only a type name and is used multiple times.\n  */\n-#define I40E_STAT(_type, _name, _stat) { \\\n+#define IAVF_STAT(_type, _name, _stat) { \\\n \t.stat_string = _name, \\\n \t.sizeof_stat = FIELD_SIZEOF(_type, _stat), \\\n \t.stat_offset = offsetof(_type, _stat) \\\n }\n \n-/* Helper macro for defining some statistics directly copied from the netdev\n- * stats structure.\n- */\n-#define I40E_NETDEV_STAT(_net_stat) \\\n-\tI40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)\n-\n /* Helper macro for defining some statistics related to queues */\n-#define I40E_QUEUE_STAT(_name, _stat) \\\n-\tI40E_STAT(struct i40e_ring, _name, _stat)\n+#define IAVF_QUEUE_STAT(_name, _stat) \\\n+\tIAVF_STAT(struct iavf_ring, _name, _stat)\n \n /* Stats associated with a Tx or Rx ring */\n-static const struct i40e_stats i40e_gstrings_queue_stats[] = {\n-\tI40E_QUEUE_STAT(\"%s-%u.packets\", stats.packets),\n-\tI40E_QUEUE_STAT(\"%s-%u.bytes\", stats.bytes),\n+static const struct iavf_stats iavf_gstrings_queue_stats[] = {\n+\tIAVF_QUEUE_STAT(\"%s-%u.packets\", stats.packets),\n+\tIAVF_QUEUE_STAT(\"%s-%u.bytes\", stats.bytes),\n };\n \n /**\n@@ -69,12 +63,12 @@ static const struct i40e_stats i40e_gstrings_queue_stats[] = {\n  * @stat: the stat definition\n  *\n  * Copies the stat data defined by the pointer and stat structure pair into\n- * the memory supplied as data. Used to implement i40e_add_ethtool_stats and\n+ * the memory supplied as data. Used to implement iavf_add_ethtool_stats and\n  * iavf_add_queue_stats. If the pointer is null, data will be zero'd.\n  */\n static void\n iavf_add_one_ethtool_stat(u64 *data, void *pointer,\n-\t\t\t  const struct i40e_stats *stat)\n+\t\t\t  const struct iavf_stats *stat)\n {\n \tchar *p;\n \n@@ -122,7 +116,7 @@ iavf_add_one_ethtool_stat(u64 *data, void *pointer,\n  **/\n static void\n __iavf_add_ethtool_stats(u64 **data, void *pointer,\n-\t\t\t const struct i40e_stats stats[],\n+\t\t\t const struct iavf_stats stats[],\n \t\t\t const unsigned int size)\n {\n \tunsigned int i;\n@@ -132,7 +126,7 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,\n }\n \n /**\n- * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer\n+ * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer\n  * @data: ethtool stats buffer\n  * @pointer: location where stats are stored\n  * @stats: static const array of stat definitions\n@@ -144,7 +138,7 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,\n  * The parameter @stats is evaluated twice, so parameters with side effects\n  * should be avoided.\n  **/\n-#define i40e_add_ethtool_stats(data, pointer, stats) \\\n+#define iavf_add_ethtool_stats(data, pointer, stats) \\\n \t__iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))\n \n /**\n@@ -153,8 +147,8 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,\n  * @ring: the ring to copy\n  *\n  * Queue statistics must be copied while protected by\n- * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats.\n- * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the\n+ * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats.\n+ * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the\n  * ring pointer is null, zero out the queue stat values and update the data\n  * pointer. Otherwise safely copy the stats from the ring into the supplied\n  * buffer and update the data pointer when finished.\n@@ -162,10 +156,10 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,\n  * This function expects to be called while under rcu_read_lock().\n  **/\n static void\n-iavf_add_queue_stats(u64 **data, struct i40e_ring *ring)\n+iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)\n {\n-\tconst unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats);\n-\tconst struct i40e_stats *stats = i40e_gstrings_queue_stats;\n+\tconst unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats);\n+\tconst struct iavf_stats *stats = iavf_gstrings_queue_stats;\n \tunsigned int start;\n \tunsigned int i;\n \n@@ -185,7 +179,7 @@ iavf_add_queue_stats(u64 **data, struct i40e_ring *ring)\n }\n \n /**\n- * __i40e_add_stat_strings - copy stat strings into ethtool buffer\n+ * __iavf_add_stat_strings - copy stat strings into ethtool buffer\n  * @p: ethtool supplied buffer\n  * @stats: stat definitions array\n  * @size: size of the stats array\n@@ -193,7 +187,7 @@ iavf_add_queue_stats(u64 **data, struct i40e_ring *ring)\n  * Format and copy the strings described by stats into the buffer pointed at\n  * by p.\n  **/\n-static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],\n+static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[],\n \t\t\t\t    const unsigned int size, ...)\n {\n \tunsigned int i;\n@@ -209,7 +203,7 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],\n }\n \n /**\n- * i40e_add_stat_strings - copy stat strings into ethtool buffer\n+ * iavf_add_stat_strings - copy stat strings into ethtool buffer\n  * @p: ethtool supplied buffer\n  * @stats: stat definitions array\n  *\n@@ -220,30 +214,30 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],\n  * should be avoided. Additionally, stats must be an array such that\n  * ARRAY_SIZE can be called on it.\n  **/\n-#define i40e_add_stat_strings(p, stats, ...) \\\n-\t__i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)\n-\n-#define IAVF_STAT(_name, _stat) \\\n-\tI40E_STAT(struct iavf_adapter, _name, _stat)\n-\n-static const struct i40e_stats iavf_gstrings_stats[] = {\n-\tIAVF_STAT(\"rx_bytes\", current_stats.rx_bytes),\n-\tIAVF_STAT(\"rx_unicast\", current_stats.rx_unicast),\n-\tIAVF_STAT(\"rx_multicast\", current_stats.rx_multicast),\n-\tIAVF_STAT(\"rx_broadcast\", current_stats.rx_broadcast),\n-\tIAVF_STAT(\"rx_discards\", current_stats.rx_discards),\n-\tIAVF_STAT(\"rx_unknown_protocol\", current_stats.rx_unknown_protocol),\n-\tIAVF_STAT(\"tx_bytes\", current_stats.tx_bytes),\n-\tIAVF_STAT(\"tx_unicast\", current_stats.tx_unicast),\n-\tIAVF_STAT(\"tx_multicast\", current_stats.tx_multicast),\n-\tIAVF_STAT(\"tx_broadcast\", current_stats.tx_broadcast),\n-\tIAVF_STAT(\"tx_discards\", current_stats.tx_discards),\n-\tIAVF_STAT(\"tx_errors\", current_stats.tx_errors),\n+#define iavf_add_stat_strings(p, stats, ...) \\\n+\t__iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)\n+\n+#define VF_STAT(_name, _stat) \\\n+\tIAVF_STAT(struct iavf_adapter, _name, _stat)\n+\n+static const struct iavf_stats iavf_gstrings_stats[] = {\n+\tVF_STAT(\"rx_bytes\", current_stats.rx_bytes),\n+\tVF_STAT(\"rx_unicast\", current_stats.rx_unicast),\n+\tVF_STAT(\"rx_multicast\", current_stats.rx_multicast),\n+\tVF_STAT(\"rx_broadcast\", current_stats.rx_broadcast),\n+\tVF_STAT(\"rx_discards\", current_stats.rx_discards),\n+\tVF_STAT(\"rx_unknown_protocol\", current_stats.rx_unknown_protocol),\n+\tVF_STAT(\"tx_bytes\", current_stats.tx_bytes),\n+\tVF_STAT(\"tx_unicast\", current_stats.tx_unicast),\n+\tVF_STAT(\"tx_multicast\", current_stats.tx_multicast),\n+\tVF_STAT(\"tx_broadcast\", current_stats.tx_broadcast),\n+\tVF_STAT(\"tx_discards\", current_stats.tx_discards),\n+\tVF_STAT(\"tx_errors\", current_stats.tx_errors),\n };\n \n #define IAVF_STATS_LEN\tARRAY_SIZE(iavf_gstrings_stats)\n \n-#define IAVF_QUEUE_STATS_LEN\tARRAY_SIZE(i40e_gstrings_queue_stats)\n+#define IAVF_QUEUE_STATS_LEN\tARRAY_SIZE(iavf_gstrings_queue_stats)\n \n /* For now we have one and only one private flag and it is only defined\n  * when we have support for the SKIP_CPU_SYNC DMA attribute.  Instead\n@@ -349,11 +343,11 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,\n \tstruct iavf_adapter *adapter = netdev_priv(netdev);\n \tunsigned int i;\n \n-\ti40e_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);\n+\tiavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);\n \n \trcu_read_lock();\n \tfor (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) {\n-\t\tstruct i40e_ring *ring;\n+\t\tstruct iavf_ring *ring;\n \n \t\t/* Avoid accessing un-allocated queues */\n \t\tring = (i < adapter->num_active_queues ?\n@@ -397,15 +391,15 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)\n {\n \tunsigned int i;\n \n-\ti40e_add_stat_strings(&data, iavf_gstrings_stats);\n+\tiavf_add_stat_strings(&data, iavf_gstrings_stats);\n \n \t/* Queues are always allocated in pairs, so we just use num_tx_queues\n \t * for both Tx and Rx queues.\n \t */\n \tfor (i = 0; i < netdev->num_tx_queues; i++) {\n-\t\ti40e_add_stat_strings(&data, i40e_gstrings_queue_stats,\n+\t\tiavf_add_stat_strings(&data, iavf_gstrings_queue_stats,\n \t\t\t\t      \"tx\", i);\n-\t\ti40e_add_stat_strings(&data, i40e_gstrings_queue_stats,\n+\t\tiavf_add_stat_strings(&data, iavf_gstrings_queue_stats,\n \t\t\t\t      \"rx\", i);\n \t}\n }\n@@ -437,7 +431,7 @@ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)\n  * @netdev: network interface device structure\n  *\n  * The get string set count and the string set should be matched for each\n- * flag returned.  Add new strings for each flag to the i40e_gstrings_priv_flags\n+ * flag returned.  Add new strings for each flag to the iavf_gstrings_priv_flags\n  * array.\n  *\n  * Returns a u32 bitmap of flags.\n@@ -548,7 +542,7 @@ static void iavf_set_msglevel(struct net_device *netdev, u32 data)\n {\n \tstruct iavf_adapter *adapter = netdev_priv(netdev);\n \n-\tif (I40E_DEBUG_USER & data)\n+\tif (IAVF_DEBUG_USER & data)\n \t\tadapter->hw.debug_mask = data;\n \tadapter->msg_enable = data;\n }\n@@ -648,8 +642,8 @@ static int __iavf_get_coalesce(struct net_device *netdev,\n \t\t\t       struct ethtool_coalesce *ec, int queue)\n {\n \tstruct iavf_adapter *adapter = netdev_priv(netdev);\n-\tstruct i40e_vsi *vsi = &adapter->vsi;\n-\tstruct i40e_ring *rx_ring, *tx_ring;\n+\tstruct iavf_vsi *vsi = &adapter->vsi;\n+\tstruct iavf_ring *rx_ring, *tx_ring;\n \n \tec->tx_max_coalesced_frames = vsi->work_limit;\n \tec->rx_max_coalesced_frames = vsi->work_limit;\n@@ -671,8 +665,8 @@ static int __iavf_get_coalesce(struct net_device *netdev,\n \tif (ITR_IS_DYNAMIC(tx_ring->itr_setting))\n \t\tec->use_adaptive_tx_coalesce = 1;\n \n-\tec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC;\n-\tec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC;\n+\tec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;\n+\tec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;\n \n \treturn 0;\n }\n@@ -718,20 +712,20 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,\n static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,\n \t\t\t\t   struct ethtool_coalesce *ec, int queue)\n {\n-\tstruct i40e_ring *rx_ring = &adapter->rx_rings[queue];\n-\tstruct i40e_ring *tx_ring = &adapter->tx_rings[queue];\n-\tstruct i40e_q_vector *q_vector;\n+\tstruct iavf_ring *rx_ring = &adapter->rx_rings[queue];\n+\tstruct iavf_ring *tx_ring = &adapter->tx_rings[queue];\n+\tstruct iavf_q_vector *q_vector;\n \n \trx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);\n \ttx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);\n \n-\trx_ring->itr_setting |= I40E_ITR_DYNAMIC;\n+\trx_ring->itr_setting |= IAVF_ITR_DYNAMIC;\n \tif (!ec->use_adaptive_rx_coalesce)\n-\t\trx_ring->itr_setting ^= I40E_ITR_DYNAMIC;\n+\t\trx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;\n \n-\ttx_ring->itr_setting |= I40E_ITR_DYNAMIC;\n+\ttx_ring->itr_setting |= IAVF_ITR_DYNAMIC;\n \tif (!ec->use_adaptive_tx_coalesce)\n-\t\ttx_ring->itr_setting ^= I40E_ITR_DYNAMIC;\n+\t\ttx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;\n \n \tq_vector = rx_ring->q_vector;\n \tq_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);\n@@ -757,7 +751,7 @@ static int __iavf_set_coalesce(struct net_device *netdev,\n \t\t\t       struct ethtool_coalesce *ec, int queue)\n {\n \tstruct iavf_adapter *adapter = netdev_priv(netdev);\n-\tstruct i40e_vsi *vsi = &adapter->vsi;\n+\tstruct iavf_vsi *vsi = &adapter->vsi;\n \tint i;\n \n \tif (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)\n@@ -766,15 +760,15 @@ static int __iavf_set_coalesce(struct net_device *netdev,\n \tif (ec->rx_coalesce_usecs == 0) {\n \t\tif (ec->use_adaptive_rx_coalesce)\n \t\t\tnetif_info(adapter, drv, netdev, \"rx-usecs=0, need to disable adaptive-rx for a complete disable\\n\");\n-\t} else if ((ec->rx_coalesce_usecs < I40E_MIN_ITR) ||\n-\t\t   (ec->rx_coalesce_usecs > I40E_MAX_ITR)) {\n+\t} else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||\n+\t\t   (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {\n \t\tnetif_info(adapter, drv, netdev, \"Invalid value, rx-usecs range is 0-8160\\n\");\n \t\treturn -EINVAL;\n \t} else if (ec->tx_coalesce_usecs == 0) {\n \t\tif (ec->use_adaptive_tx_coalesce)\n \t\t\tnetif_info(adapter, drv, netdev, \"tx-usecs=0, need to disable adaptive-tx for a complete disable\\n\");\n-\t} else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) ||\n-\t\t   (ec->tx_coalesce_usecs > I40E_MAX_ITR)) {\n+\t} else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||\n+\t\t   (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {\n \t\tnetif_info(adapter, drv, netdev, \"Invalid value, tx-usecs range is 0-8160\\n\");\n \t\treturn -EINVAL;\n \t}\ndiff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c\nindex b8edf43e36f1..74b547634f48 100644\n--- a/drivers/net/ethernet/intel/iavf/iavf_main.c\n+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c\n@@ -67,8 +67,8 @@ static struct workqueue_struct *iavf_wq;\n  * @alignment: what to align the allocation to\n  **/\n iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,\n-\t\t\t\t      struct i40e_dma_mem *mem,\n-\t\t\t\t      u64 size, u32 alignment)\n+\t\t\t\t    struct iavf_dma_mem *mem,\n+\t\t\t\t    u64 size, u32 alignment)\n {\n \tstruct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;\n \n@@ -89,7 +89,7 @@ iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,\n  * @hw:   pointer to the HW structure\n  * @mem:  ptr to mem struct to free\n  **/\n-iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct i40e_dma_mem *mem)\n+iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem)\n {\n \tstruct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;\n \n@@ -107,7 +107,7 @@ iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct i40e_dma_mem *mem)\n  * @size: size of memory requested\n  **/\n iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,\n-\t\t\t\t       struct i40e_virt_mem *mem, u32 size)\n+\t\t\t\t     struct iavf_virt_mem *mem, u32 size)\n {\n \tif (!mem)\n \t\treturn I40E_ERR_PARAM;\n@@ -126,8 +126,7 @@ iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,\n  * @hw:   pointer to the HW structure\n  * @mem:  ptr to mem struct to free\n  **/\n-iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,\n-\t\t\t\t   struct i40e_virt_mem *mem)\n+iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem)\n {\n \tif (!mem)\n \t\treturn I40E_ERR_PARAM;\n@@ -300,7 +299,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)\n  **/\n static irqreturn_t iavf_msix_clean_rings(int irq, void *data)\n {\n-\tstruct i40e_q_vector *q_vector = data;\n+\tstruct iavf_q_vector *q_vector = data;\n \n \tif (!q_vector->tx.ring && !q_vector->rx.ring)\n \t\treturn IRQ_HANDLED;\n@@ -319,8 +318,8 @@ static irqreturn_t iavf_msix_clean_rings(int irq, void *data)\n static void\n iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)\n {\n-\tstruct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];\n-\tstruct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];\n+\tstruct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];\n+\tstruct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];\n \tstruct iavf_hw *hw = &adapter->hw;\n \n \trx_ring->q_vector = q_vector;\n@@ -331,7 +330,7 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)\n \tq_vector->rx.next_update = jiffies + 1;\n \tq_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);\n \tq_vector->ring_mask |= BIT(r_idx);\n-\twr32(hw, IAVF_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx),\n+\twr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),\n \t     q_vector->rx.current_itr);\n \tq_vector->rx.current_itr = q_vector->rx.target_itr;\n }\n@@ -345,8 +344,8 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)\n static void\n iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)\n {\n-\tstruct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];\n-\tstruct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];\n+\tstruct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];\n+\tstruct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];\n \tstruct iavf_hw *hw = &adapter->hw;\n \n \ttx_ring->q_vector = q_vector;\n@@ -357,7 +356,7 @@ iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)\n \tq_vector->tx.next_update = jiffies + 1;\n \tq_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);\n \tq_vector->num_ringpairs++;\n-\twr32(hw, IAVF_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx),\n+\twr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),\n \t     q_vector->tx.target_itr);\n \tq_vector->tx.current_itr = q_vector->tx.target_itr;\n }\n@@ -409,7 +408,7 @@ static void iavf_netpoll(struct net_device *netdev)\n \tint i;\n \n \t/* if interface is down do nothing */\n-\tif (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))\n+\tif (test_bit(__IAVF_VSI_DOWN, adapter->vsi.state))\n \t\treturn;\n \n \tfor (i = 0; i < q_vectors; i++)\n@@ -428,8 +427,8 @@ static void iavf_netpoll(struct net_device *netdev)\n static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,\n \t\t\t\t       const cpumask_t *mask)\n {\n-\tstruct i40e_q_vector *q_vector =\n-\t\tcontainer_of(notify, struct i40e_q_vector, affinity_notify);\n+\tstruct iavf_q_vector *q_vector =\n+\t\tcontainer_of(notify, struct iavf_q_vector, affinity_notify);\n \n \tcpumask_copy(&q_vector->affinity_mask, mask);\n }\n@@ -465,7 +464,7 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)\n \tq_vectors = adapter->num_msix_vectors - NONQ_VECS;\n \n \tfor (vector = 0; vector < q_vectors; vector++) {\n-\t\tstruct i40e_q_vector *q_vector = &adapter->q_vectors[vector];\n+\t\tstruct iavf_q_vector *q_vector = &adapter->q_vectors[vector];\n \n \t\tirq_num = adapter->msix_entries[vector + NONQ_VECS].vector;\n \n@@ -609,7 +608,7 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)\n  **/\n static void iavf_configure_rx(struct iavf_adapter *adapter)\n {\n-\tunsigned int rx_buf_len = I40E_RXBUFFER_2048;\n+\tunsigned int rx_buf_len = IAVF_RXBUFFER_2048;\n \tstruct iavf_hw *hw = &adapter->hw;\n \tint i;\n \n@@ -622,15 +621,15 @@ static void iavf_configure_rx(struct iavf_adapter *adapter)\n \t\t * an order 1 page, so we might as well increase the size\n \t\t * of our Rx buffer to make better use of the available space\n \t\t */\n-\t\trx_buf_len = I40E_RXBUFFER_3072;\n+\t\trx_buf_len = IAVF_RXBUFFER_3072;\n \n \t\t/* We use a 1536 buffer size for configurations with\n \t\t * standard Ethernet mtu.  On x86 this gives us enough room\n \t\t * for shared info and 192 bytes of padding.\n \t\t */\n-\t\tif (!I40E_2K_TOO_SMALL_WITH_PADDING &&\n+\t\tif (!IAVF_2K_TOO_SMALL_WITH_PADDING &&\n \t\t    (netdev->mtu <= ETH_DATA_LEN))\n-\t\t\trx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;\n+\t\t\trx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;\n \t}\n #endif\n \n@@ -779,7 +778,7 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,\n }\n \n /**\n- * i40e_add_filter - Add a mac filter to the filter list\n+ * iavf_add_filter - Add a mac filter to the filter list\n  * @adapter: board private structure\n  * @macaddr: the MAC address\n  *\n@@ -937,7 +936,7 @@ static void iavf_set_rx_mode(struct net_device *netdev)\n static void iavf_napi_enable_all(struct iavf_adapter *adapter)\n {\n \tint q_idx;\n-\tstruct i40e_q_vector *q_vector;\n+\tstruct iavf_q_vector *q_vector;\n \tint q_vectors = adapter->num_msix_vectors - NONQ_VECS;\n \n \tfor (q_idx = 0; q_idx < q_vectors; q_idx++) {\n@@ -956,7 +955,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter)\n static void iavf_napi_disable_all(struct iavf_adapter *adapter)\n {\n \tint q_idx;\n-\tstruct i40e_q_vector *q_vector;\n+\tstruct iavf_q_vector *q_vector;\n \tint q_vectors = adapter->num_msix_vectors - NONQ_VECS;\n \n \tfor (q_idx = 0; q_idx < q_vectors; q_idx++) {\n@@ -981,9 +980,9 @@ static void iavf_configure(struct iavf_adapter *adapter)\n \tadapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;\n \n \tfor (i = 0; i < adapter->num_active_queues; i++) {\n-\t\tstruct i40e_ring *ring = &adapter->rx_rings[i];\n+\t\tstruct iavf_ring *ring = &adapter->rx_rings[i];\n \n-\t\tiavf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));\n+\t\tiavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));\n \t}\n }\n \n@@ -996,7 +995,7 @@ static void iavf_configure(struct iavf_adapter *adapter)\n static void iavf_up_complete(struct iavf_adapter *adapter)\n {\n \tadapter->state = __IAVF_RUNNING;\n-\tclear_bit(__I40E_VSI_DOWN, adapter->vsi.state);\n+\tclear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);\n \n \tiavf_napi_enable_all(adapter);\n \n@@ -1007,7 +1006,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)\n }\n \n /**\n- * i40e_down - Shutdown the connection processing\n+ * iavf_down - Shutdown the connection processing\n  * @adapter: board private structure\n  *\n  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.\n@@ -1159,17 +1158,17 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter)\n \n \n \tadapter->tx_rings = kcalloc(num_active_queues,\n-\t\t\t\t    sizeof(struct i40e_ring), GFP_KERNEL);\n+\t\t\t\t    sizeof(struct iavf_ring), GFP_KERNEL);\n \tif (!adapter->tx_rings)\n \t\tgoto err_out;\n \tadapter->rx_rings = kcalloc(num_active_queues,\n-\t\t\t\t    sizeof(struct i40e_ring), GFP_KERNEL);\n+\t\t\t\t    sizeof(struct iavf_ring), GFP_KERNEL);\n \tif (!adapter->rx_rings)\n \t\tgoto err_out;\n \n \tfor (i = 0; i < num_active_queues; i++) {\n-\t\tstruct i40e_ring *tx_ring;\n-\t\tstruct i40e_ring *rx_ring;\n+\t\tstruct iavf_ring *tx_ring;\n+\t\tstruct iavf_ring *rx_ring;\n \n \t\ttx_ring = &adapter->tx_rings[i];\n \n@@ -1177,16 +1176,16 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter)\n \t\ttx_ring->netdev = adapter->netdev;\n \t\ttx_ring->dev = &adapter->pdev->dev;\n \t\ttx_ring->count = adapter->tx_desc_count;\n-\t\ttx_ring->itr_setting = I40E_ITR_TX_DEF;\n+\t\ttx_ring->itr_setting = IAVF_ITR_TX_DEF;\n \t\tif (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)\n-\t\t\ttx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;\n+\t\t\ttx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;\n \n \t\trx_ring = &adapter->rx_rings[i];\n \t\trx_ring->queue_index = i;\n \t\trx_ring->netdev = adapter->netdev;\n \t\trx_ring->dev = &adapter->pdev->dev;\n \t\trx_ring->count = adapter->rx_desc_count;\n-\t\trx_ring->itr_setting = I40E_ITR_RX_DEF;\n+\t\trx_ring->itr_setting = IAVF_ITR_RX_DEF;\n \t}\n \n \tadapter->num_active_queues = num_active_queues;\n@@ -1244,7 +1243,7 @@ static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)\n }\n \n /**\n- * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands\n+ * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands\n  * @adapter: board private structure\n  *\n  * Return 0 on success, negative on failure\n@@ -1356,9 +1355,9 @@ static int iavf_init_rss(struct iavf_adapter *adapter)\n \t\t/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */\n \t\tif (adapter->vf_res->vf_cap_flags &\n \t\t    VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)\n-\t\t\tadapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;\n+\t\t\tadapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;\n \t\telse\n-\t\t\tadapter->hena = I40E_DEFAULT_RSS_HENA;\n+\t\t\tadapter->hena = IAVF_DEFAULT_RSS_HENA;\n \n \t\twr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);\n \t\twr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));\n@@ -1381,7 +1380,7 @@ static int iavf_init_rss(struct iavf_adapter *adapter)\n static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)\n {\n \tint q_idx = 0, num_q_vectors;\n-\tstruct i40e_q_vector *q_vector;\n+\tstruct iavf_q_vector *q_vector;\n \n \tnum_q_vectors = adapter->num_msix_vectors - NONQ_VECS;\n \tadapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),\n@@ -1423,7 +1422,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)\n \tnapi_vectors = adapter->num_active_queues;\n \n \tfor (q_idx = 0; q_idx < num_q_vectors; q_idx++) {\n-\t\tstruct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];\n+\t\tstruct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];\n \n \t\tif (q_idx < napi_vectors)\n \t\t\tnetif_napi_del(&q_vector->napi);\n@@ -1543,7 +1542,7 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)\n \tif (err)\n \t\tgoto err;\n \n-\tset_bit(__I40E_VSI_DOWN, adapter->vsi.state);\n+\tset_bit(__IAVF_VSI_DOWN, adapter->vsi.state);\n \n \tiavf_map_rings_to_vectors(adapter);\n \n@@ -1783,7 +1782,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)\n \t * tasks have finished, since we're not holding the rtnl_lock here.\n \t */\n \tif (adapter->state == __IAVF_RUNNING) {\n-\t\tset_bit(__I40E_VSI_DOWN, adapter->vsi.state);\n+\t\tset_bit(__IAVF_VSI_DOWN, adapter->vsi.state);\n \t\tnetif_carrier_off(adapter->netdev);\n \t\tnetif_tx_disable(adapter->netdev);\n \t\tadapter->link_up = false;\n@@ -3057,7 +3056,7 @@ static int iavf_close(struct net_device *netdev)\n \t\t\t\t&adapter->crit_section))\n \t\tusleep_range(500, 1000);\n \n-\tset_bit(__I40E_VSI_DOWN, adapter->vsi.state);\n+\tset_bit(__IAVF_VSI_DOWN, adapter->vsi.state);\n \tif (CLIENT_ENABLED(adapter))\n \t\tadapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;\n \n@@ -3109,7 +3108,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)\n }\n \n /**\n- * i40e_set_features - set the netdev feature flags\n+ * iavf_set_features - set the netdev feature flags\n  * @netdev: ptr to the netdev being adjusted\n  * @features: the feature set that the stack is suggesting\n  * Note: expects to be called while under rtnl_lock()\n@@ -3269,7 +3268,7 @@ int iavf_process_config(struct iavf_adapter *adapter)\n \tstruct virtchnl_vf_resource *vfres = adapter->vf_res;\n \tint i, num_req_queues = adapter->num_req_queues;\n \tstruct net_device *netdev = adapter->netdev;\n-\tstruct i40e_vsi *vsi = &adapter->vsi;\n+\tstruct iavf_vsi *vsi = &adapter->vsi;\n \tnetdev_features_t hw_enc_features;\n \tnetdev_features_t hw_features;\n \n@@ -3382,7 +3381,7 @@ int iavf_process_config(struct iavf_adapter *adapter)\n \n \tadapter->vsi.back = adapter;\n \tadapter->vsi.base_vector = 1;\n-\tadapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;\n+\tadapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;\n \tvsi->netdev = adapter->netdev;\n \tvsi->qs_handle = adapter->vsi_res->qset_handle;\n \tif (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {\n@@ -3423,7 +3422,7 @@ static void iavf_init_task(struct work_struct *work)\n \t\t/* driver loaded, probe complete */\n \t\tadapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;\n \t\tadapter->flags &= ~IAVF_FLAG_RESET_PENDING;\n-\t\terr = i40e_set_mac_type(hw);\n+\t\terr = iavf_set_mac_type(hw);\n \t\tif (err) {\n \t\t\tdev_err(&pdev->dev, \"Failed to set MAC type (%d)\\n\",\n \t\t\t\terr);\n@@ -3487,7 +3486,7 @@ static void iavf_init_task(struct work_struct *work)\n \t\t/* aq msg sent, awaiting reply */\n \t\tif (!adapter->vf_res) {\n \t\t\tbufsz = sizeof(struct virtchnl_vf_resource) +\n-\t\t\t\t(I40E_MAX_VF_VSI *\n+\t\t\t\t(IAVF_MAX_VF_VSI *\n \t\t\t\t sizeof(struct virtchnl_vsi_resource));\n \t\t\tadapter->vf_res = kzalloc(bufsz, GFP_KERNEL);\n \t\t\tif (!adapter->vf_res)\n@@ -3529,7 +3528,7 @@ static void iavf_init_task(struct work_struct *work)\n \n \t/* MTU range: 68 - 9710 */\n \tnetdev->min_mtu = ETH_MIN_MTU;\n-\tnetdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;\n+\tnetdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;\n \n \tif (!is_valid_ether_addr(adapter->hw.mac.addr)) {\n \t\tdev_info(&pdev->dev, \"Invalid MAC address %pM, using random\\n\",\n@@ -3583,7 +3582,7 @@ static void iavf_init_task(struct work_struct *work)\n \t\tdev_info(&pdev->dev, \"GRO is enabled\\n\");\n \n \tadapter->state = __IAVF_DOWN;\n-\tset_bit(__I40E_VSI_DOWN, adapter->vsi.state);\n+\tset_bit(__IAVF_VSI_DOWN, adapter->vsi.state);\n \tiavf_misc_irq_enable(adapter);\n \twake_up(&adapter->down_waitqueue);\n \n@@ -3969,9 +3968,9 @@ static struct pci_driver iavf_driver = {\n };\n \n /**\n- * i40e_init_module - Driver Registration Routine\n+ * iavf_init_module - Driver Registration Routine\n  *\n- * i40e_init_module is the first routine called when the driver is\n+ * iavf_init_module is the first routine called when the driver is\n  * loaded. All it does is register with the PCI subsystem.\n  **/\n static int __init iavf_init_module(void)\n@@ -3996,9 +3995,9 @@ static int __init iavf_init_module(void)\n module_init(iavf_init_module);\n \n /**\n- * i40e_exit_module - Driver Exit Cleanup Routine\n+ * iavf_exit_module - Driver Exit Cleanup Routine\n  *\n- * i40e_exit_module is called just before the driver is removed\n+ * iavf_exit_module is called just before the driver is removed\n  * from memory.\n  **/\n static void __exit iavf_exit_module(void)\ndiff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h\nindex 24f34d79f20a..1474f5539751 100644\n--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h\n+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h\n@@ -59,9 +59,9 @@\n DECLARE_EVENT_CLASS(\n \tiavf_tx_template,\n \n-\tTP_PROTO(struct i40e_ring *ring,\n-\t\t struct i40e_tx_desc *desc,\n-\t\t struct i40e_tx_buffer *buf),\n+\tTP_PROTO(struct iavf_ring *ring,\n+\t\t struct iavf_tx_desc *desc,\n+\t\t struct iavf_tx_buffer *buf),\n \n \tTP_ARGS(ring, desc, buf),\n \n@@ -94,25 +94,25 @@ DECLARE_EVENT_CLASS(\n \n DEFINE_EVENT(\n \tiavf_tx_template, iavf_clean_tx_irq,\n-\tTP_PROTO(struct i40e_ring *ring,\n-\t\t struct i40e_tx_desc *desc,\n-\t\t struct i40e_tx_buffer *buf),\n+\tTP_PROTO(struct iavf_ring *ring,\n+\t\t struct iavf_tx_desc *desc,\n+\t\t struct iavf_tx_buffer *buf),\n \n \tTP_ARGS(ring, desc, buf));\n \n DEFINE_EVENT(\n \tiavf_tx_template, iavf_clean_tx_irq_unmap,\n-\tTP_PROTO(struct i40e_ring *ring,\n-\t\t struct i40e_tx_desc *desc,\n-\t\t struct i40e_tx_buffer *buf),\n+\tTP_PROTO(struct iavf_ring *ring,\n+\t\t struct iavf_tx_desc *desc,\n+\t\t struct iavf_tx_buffer *buf),\n \n \tTP_ARGS(ring, desc, buf));\n \n DECLARE_EVENT_CLASS(\n \tiavf_rx_template,\n \n-\tTP_PROTO(struct i40e_ring *ring,\n-\t\t union i40e_32byte_rx_desc *desc,\n+\tTP_PROTO(struct iavf_ring *ring,\n+\t\t union iavf_32byte_rx_desc *desc,\n \t\t struct sk_buff *skb),\n \n \tTP_ARGS(ring, desc, skb),\n@@ -139,16 +139,16 @@ DECLARE_EVENT_CLASS(\n \n DEFINE_EVENT(\n \tiavf_rx_template, iavf_clean_rx_irq,\n-\tTP_PROTO(struct i40e_ring *ring,\n-\t\t union i40e_32byte_rx_desc *desc,\n+\tTP_PROTO(struct iavf_ring *ring,\n+\t\t union iavf_32byte_rx_desc *desc,\n \t\t struct sk_buff *skb),\n \n \tTP_ARGS(ring, desc, skb));\n \n DEFINE_EVENT(\n \tiavf_rx_template, iavf_clean_rx_irq_rx,\n-\tTP_PROTO(struct i40e_ring *ring,\n-\t\t union i40e_32byte_rx_desc *desc,\n+\tTP_PROTO(struct iavf_ring *ring,\n+\t\t union iavf_32byte_rx_desc *desc,\n \t\t struct sk_buff *skb),\n \n \tTP_ARGS(ring, desc, skb));\n@@ -157,7 +157,7 @@ DECLARE_EVENT_CLASS(\n \tiavf_xmit_template,\n \n \tTP_PROTO(struct sk_buff *skb,\n-\t\t struct i40e_ring *ring),\n+\t\t struct iavf_ring *ring),\n \n \tTP_ARGS(skb, ring),\n \n@@ -182,14 +182,14 @@ DECLARE_EVENT_CLASS(\n DEFINE_EVENT(\n \tiavf_xmit_template, iavf_xmit_frame_ring,\n \tTP_PROTO(struct sk_buff *skb,\n-\t\t struct i40e_ring *ring),\n+\t\t struct iavf_ring *ring),\n \n \tTP_ARGS(skb, ring));\n \n DEFINE_EVENT(\n \tiavf_xmit_template, iavf_xmit_frame_ring_drop,\n \tTP_PROTO(struct sk_buff *skb,\n-\t\t struct i40e_ring *ring),\n+\t\t struct iavf_ring *ring),\n \n \tTP_ARGS(skb, ring));\n \ndiff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c\nindex 5164e812f009..517c37a44026 100644\n--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c\n+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c\n@@ -12,24 +12,24 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,\n \t\t\t\tu32 td_tag)\n {\n \treturn cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |\n-\t\t\t   ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |\n-\t\t\t   ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |\n-\t\t\t   ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |\n-\t\t\t   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));\n+\t\t\t   ((u64)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |\n+\t\t\t   ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |\n+\t\t\t   ((u64)size  << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |\n+\t\t\t   ((u64)td_tag  << IAVF_TXD_QW1_L2TAG1_SHIFT));\n }\n \n-#define I40E_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)\n+#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)\n \n /**\n- * i40e_unmap_and_free_tx_resource - Release a Tx buffer\n+ * iavf_unmap_and_free_tx_resource - Release a Tx buffer\n  * @ring:      the ring that owns the buffer\n  * @tx_buffer: the buffer to free\n  **/\n-static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,\n-\t\t\t\t\t    struct i40e_tx_buffer *tx_buffer)\n+static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,\n+\t\t\t\t\t    struct iavf_tx_buffer *tx_buffer)\n {\n \tif (tx_buffer->skb) {\n-\t\tif (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)\n+\t\tif (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)\n \t\t\tkfree(tx_buffer->raw_buf);\n \t\telse\n \t\t\tdev_kfree_skb_any(tx_buffer->skb);\n@@ -55,7 +55,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,\n  * iavf_clean_tx_ring - Free any empty Tx buffers\n  * @tx_ring: ring to be cleaned\n  **/\n-void iavf_clean_tx_ring(struct i40e_ring *tx_ring)\n+void iavf_clean_tx_ring(struct iavf_ring *tx_ring)\n {\n \tunsigned long bi_size;\n \tu16 i;\n@@ -66,9 +66,9 @@ void iavf_clean_tx_ring(struct i40e_ring *tx_ring)\n \n \t/* Free all the Tx ring sk_buffs */\n \tfor (i = 0; i < tx_ring->count; i++)\n-\t\ti40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);\n+\t\tiavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);\n \n-\tbi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;\n+\tbi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;\n \tmemset(tx_ring->tx_bi, 0, bi_size);\n \n \t/* Zero out the descriptor ring */\n@@ -90,7 +90,7 @@ void iavf_clean_tx_ring(struct i40e_ring *tx_ring)\n  *\n  * Free all transmit software resources\n  **/\n-void iavf_free_tx_resources(struct i40e_ring *tx_ring)\n+void iavf_free_tx_resources(struct iavf_ring *tx_ring)\n {\n \tiavf_clean_tx_ring(tx_ring);\n \tkfree(tx_ring->tx_bi);\n@@ -111,7 +111,7 @@ void iavf_free_tx_resources(struct i40e_ring *tx_ring)\n  * Since there is no access to the ring head register\n  * in XL710, we need to use our local copies\n  **/\n-u32 iavf_get_tx_pending(struct i40e_ring *ring, bool in_sw)\n+u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)\n {\n \tu32 head, tail;\n \n@@ -132,9 +132,9 @@ u32 iavf_get_tx_pending(struct i40e_ring *ring, bool in_sw)\n  * VSI has netdev and netdev has TX queues. This function is to check each of\n  * those TX queues if they are hung, trigger recovery by issuing SW interrupt.\n  **/\n-void iavf_detect_recover_hung(struct i40e_vsi *vsi)\n+void iavf_detect_recover_hung(struct iavf_vsi *vsi)\n {\n-\tstruct i40e_ring *tx_ring = NULL;\n+\tstruct iavf_ring *tx_ring = NULL;\n \tstruct net_device *netdev;\n \tunsigned int i;\n \tint packets;\n@@ -142,7 +142,7 @@ void iavf_detect_recover_hung(struct i40e_vsi *vsi)\n \tif (!vsi)\n \t\treturn;\n \n-\tif (test_bit(__I40E_VSI_DOWN, vsi->state))\n+\tif (test_bit(__IAVF_VSI_DOWN, vsi->state))\n \t\treturn;\n \n \tnetdev = vsi->netdev;\n@@ -181,19 +181,19 @@ void iavf_detect_recover_hung(struct i40e_vsi *vsi)\n #define WB_STRIDE 4\n \n /**\n- * i40e_clean_tx_irq - Reclaim resources after transmit completes\n+ * iavf_clean_tx_irq - Reclaim resources after transmit completes\n  * @vsi: the VSI we care about\n  * @tx_ring: Tx ring to clean\n  * @napi_budget: Used to determine if we are in netpoll\n  *\n  * Returns true if there's any budget left (e.g. the clean is finished)\n  **/\n-static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,\n-\t\t\t      struct i40e_ring *tx_ring, int napi_budget)\n+static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,\n+\t\t\t      struct iavf_ring *tx_ring, int napi_budget)\n {\n \tu16 i = tx_ring->next_to_clean;\n-\tstruct i40e_tx_buffer *tx_buf;\n-\tstruct i40e_tx_desc *tx_desc;\n+\tstruct iavf_tx_buffer *tx_buf;\n+\tstruct iavf_tx_desc *tx_desc;\n \tunsigned int total_bytes = 0, total_packets = 0;\n \tunsigned int budget = vsi->work_limit;\n \n@@ -202,7 +202,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,\n \ti -= tx_ring->count;\n \n \tdo {\n-\t\tstruct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;\n+\t\tstruct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;\n \n \t\t/* if next_to_watch is not set then there is no work pending */\n \t\tif (!eop_desc)\n@@ -286,7 +286,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,\n \ttx_ring->q_vector->tx.total_bytes += total_bytes;\n \ttx_ring->q_vector->tx.total_packets += total_packets;\n \n-\tif (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {\n+\tif (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {\n \t\t/* check to see if there are < 4 descriptors\n \t\t * waiting to be written back, then kick the hardware to force\n \t\t * them to be written back in case we stay in NAPI.\n@@ -296,8 +296,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,\n \n \t\tif (budget &&\n \t\t    ((j / WB_STRIDE) == 0) && (j > 0) &&\n-\t\t    !test_bit(__I40E_VSI_DOWN, vsi->state) &&\n-\t\t    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))\n+\t\t    !test_bit(__IAVF_VSI_DOWN, vsi->state) &&\n+\t\t    (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))\n \t\t\ttx_ring->arm_wb = true;\n \t}\n \n@@ -307,14 +307,14 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,\n \n #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))\n \tif (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&\n-\t\t     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {\n+\t\t     (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {\n \t\t/* Make sure that anybody stopping the queue after this\n \t\t * sees the new next_to_clean.\n \t\t */\n \t\tsmp_mb();\n \t\tif (__netif_subqueue_stopped(tx_ring->netdev,\n \t\t\t\t\t     tx_ring->queue_index) &&\n-\t\t   !test_bit(__I40E_VSI_DOWN, vsi->state)) {\n+\t\t   !test_bit(__IAVF_VSI_DOWN, vsi->state)) {\n \t\t\tnetif_wake_subqueue(tx_ring->netdev,\n \t\t\t\t\t    tx_ring->queue_index);\n \t\t\t++tx_ring->tx_stats.restart_queue;\n@@ -330,13 +330,13 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,\n  * @q_vector: the vector on which to enable writeback\n  *\n  **/\n-static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,\n-\t\t\t\t  struct i40e_q_vector *q_vector)\n+static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,\n+\t\t\t\t  struct iavf_q_vector *q_vector)\n {\n \tu16 flags = q_vector->tx.ring[0].flags;\n \tu32 val;\n \n-\tif (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))\n+\tif (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))\n \t\treturn;\n \n \tif (q_vector->arm_wb_state)\n@@ -356,7 +356,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,\n  * @q_vector: the vector  on which to force writeback\n  *\n  **/\n-void iavf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)\n+void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)\n {\n \tu32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |\n \t\t  IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */\n@@ -369,31 +369,31 @@ void iavf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)\n \t     val);\n }\n \n-static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,\n-\t\t\t\t\tstruct i40e_ring_container *rc)\n+static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,\n+\t\t\t\t\tstruct iavf_ring_container *rc)\n {\n \treturn &q_vector->rx == rc;\n }\n \n-static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)\n+static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)\n {\n \tunsigned int divisor;\n \n \tswitch (q_vector->adapter->link_speed) {\n \tcase I40E_LINK_SPEED_40GB:\n-\t\tdivisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;\n+\t\tdivisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;\n \t\tbreak;\n \tcase I40E_LINK_SPEED_25GB:\n \tcase I40E_LINK_SPEED_20GB:\n-\t\tdivisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;\n+\t\tdivisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;\n \t\tbreak;\n \tdefault:\n \tcase I40E_LINK_SPEED_10GB:\n-\t\tdivisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;\n+\t\tdivisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;\n \t\tbreak;\n \tcase I40E_LINK_SPEED_1GB:\n \tcase I40E_LINK_SPEED_100MB:\n-\t\tdivisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;\n+\t\tdivisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;\n \t\tbreak;\n \t}\n \n@@ -401,7 +401,7 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)\n }\n \n /**\n- * i40e_update_itr - update the dynamic ITR value based on statistics\n+ * iavf_update_itr - update the dynamic ITR value based on statistics\n  * @q_vector: structure containing interrupt and ring information\n  * @rc: structure containing ring performance data\n  *\n@@ -413,8 +413,8 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)\n  * on testing data as well as attempting to minimize response time\n  * while increasing bulk throughput.\n  **/\n-static void i40e_update_itr(struct i40e_q_vector *q_vector,\n-\t\t\t    struct i40e_ring_container *rc)\n+static void iavf_update_itr(struct iavf_q_vector *q_vector,\n+\t\t\t    struct iavf_ring_container *rc)\n {\n \tunsigned int avg_wire_size, packets, bytes, itr;\n \tunsigned long next_update = jiffies;\n@@ -428,9 +428,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,\n \t/* For Rx we want to push the delay up and default to low latency.\n \t * for Tx we want to pull the delay down and default to high latency.\n \t */\n-\titr = i40e_container_is_rx(q_vector, rc) ?\n-\t      I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :\n-\t      I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;\n+\titr = iavf_container_is_rx(q_vector, rc) ?\n+\t      IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :\n+\t      IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;\n \n \t/* If we didn't update within up to 1 - 2 jiffies we can assume\n \t * that either packets are coming in so slow there hasn't been\n@@ -454,15 +454,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,\n \tpackets = rc->total_packets;\n \tbytes = rc->total_bytes;\n \n-\tif (i40e_container_is_rx(q_vector, rc)) {\n+\tif (iavf_container_is_rx(q_vector, rc)) {\n \t\t/* If Rx there are 1 to 4 packets and bytes are less than\n \t\t * 9000 assume insufficient data to use bulk rate limiting\n \t\t * approach unless Tx is already in bulk rate limiting. We\n \t\t * are likely latency driven.\n \t\t */\n \t\tif (packets && packets < 4 && bytes < 9000 &&\n-\t\t    (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {\n-\t\t\titr = I40E_ITR_ADAPTIVE_LATENCY;\n+\t\t    (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {\n+\t\t\titr = IAVF_ITR_ADAPTIVE_LATENCY;\n \t\t\tgoto adjust_by_size;\n \t\t}\n \t} else if (packets < 4) {\n@@ -471,15 +471,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,\n \t\t * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so\n \t\t * that the Rx can relax.\n \t\t */\n-\t\tif (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&\n-\t\t    (q_vector->rx.target_itr & I40E_ITR_MASK) ==\n-\t\t     I40E_ITR_ADAPTIVE_MAX_USECS)\n+\t\tif (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&\n+\t\t    (q_vector->rx.target_itr & IAVF_ITR_MASK) ==\n+\t\t     IAVF_ITR_ADAPTIVE_MAX_USECS)\n \t\t\tgoto clear_counts;\n \t} else if (packets > 32) {\n \t\t/* If we have processed over 32 packets in a single interrupt\n \t\t * for Tx assume we need to switch over to \"bulk\" mode.\n \t\t */\n-\t\trc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;\n+\t\trc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;\n \t}\n \n \t/* We have no packets to actually measure against. This means\n@@ -491,17 +491,17 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,\n \t * fixed amount.\n \t */\n \tif (packets < 56) {\n-\t\titr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;\n-\t\tif ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {\n-\t\t\titr &= I40E_ITR_ADAPTIVE_LATENCY;\n-\t\t\titr += I40E_ITR_ADAPTIVE_MAX_USECS;\n+\t\titr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;\n+\t\tif ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {\n+\t\t\titr &= IAVF_ITR_ADAPTIVE_LATENCY;\n+\t\t\titr += IAVF_ITR_ADAPTIVE_MAX_USECS;\n \t\t}\n \t\tgoto clear_counts;\n \t}\n \n \tif (packets <= 256) {\n \t\titr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);\n-\t\titr &= I40E_ITR_MASK;\n+\t\titr &= IAVF_ITR_MASK;\n \n \t\t/* Between 56 and 112 is our \"goldilocks\" zone where we are\n \t\t * working out \"just right\". Just report that our current\n@@ -516,9 +516,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,\n \t\t * in half per interrupt.\n \t\t */\n \t\titr /= 2;\n-\t\titr &= I40E_ITR_MASK;\n-\t\tif (itr < I40E_ITR_ADAPTIVE_MIN_USECS)\n-\t\t\titr = I40E_ITR_ADAPTIVE_MIN_USECS;\n+\t\titr &= IAVF_ITR_MASK;\n+\t\tif (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)\n+\t\t\titr = IAVF_ITR_ADAPTIVE_MIN_USECS;\n \n \t\tgoto clear_counts;\n \t}\n@@ -529,7 +529,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,\n \t * though for smaller packet sizes there isn't much we can do as\n \t * NAPI polling will likely be kicking in sooner rather than later.\n \t */\n-\titr = I40E_ITR_ADAPTIVE_BULK;\n+\titr = IAVF_ITR_ADAPTIVE_BULK;\n \n adjust_by_size:\n \t/* If packet counts are 256 or greater we can assume we have a gross\n@@ -577,7 +577,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,\n \t/* If we are in low latency mode halve our delay which doubles the\n \t * rate to somewhere between 100K to 16K ints/sec\n \t */\n-\tif (itr & I40E_ITR_ADAPTIVE_LATENCY)\n+\tif (itr & IAVF_ITR_ADAPTIVE_LATENCY)\n \t\tavg_wire_size /= 2;\n \n \t/* Resultant value is 256 times larger than it needs to be. This\n@@ -587,12 +587,12 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,\n \t * Use addition as we have already recorded the new latency flag\n \t * for the ITR value.\n \t */\n-\titr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *\n-\t       I40E_ITR_ADAPTIVE_MIN_INC;\n+\titr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *\n+\t       IAVF_ITR_ADAPTIVE_MIN_INC;\n \n-\tif ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {\n-\t\titr &= I40E_ITR_ADAPTIVE_LATENCY;\n-\t\titr += I40E_ITR_ADAPTIVE_MAX_USECS;\n+\tif ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {\n+\t\titr &= IAVF_ITR_ADAPTIVE_LATENCY;\n+\t\titr += IAVF_ITR_ADAPTIVE_MAX_USECS;\n \t}\n \n clear_counts:\n@@ -612,7 +612,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,\n  *\n  * Return 0 on success, negative on error\n  **/\n-int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring)\n+int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)\n {\n \tstruct device *dev = tx_ring->dev;\n \tint bi_size;\n@@ -622,13 +622,13 @@ int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring)\n \n \t/* warn if we are about to overwrite the pointer */\n \tWARN_ON(tx_ring->tx_bi);\n-\tbi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;\n+\tbi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;\n \ttx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);\n \tif (!tx_ring->tx_bi)\n \t\tgoto err;\n \n \t/* round up to nearest 4K */\n-\ttx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);\n+\ttx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);\n \ttx_ring->size = ALIGN(tx_ring->size, 4096);\n \ttx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,\n \t\t\t\t\t   &tx_ring->dma, GFP_KERNEL);\n@@ -653,7 +653,7 @@ int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring)\n  * iavf_clean_rx_ring - Free Rx buffers\n  * @rx_ring: ring to be cleaned\n  **/\n-void iavf_clean_rx_ring(struct i40e_ring *rx_ring)\n+void iavf_clean_rx_ring(struct iavf_ring *rx_ring)\n {\n \tunsigned long bi_size;\n \tu16 i;\n@@ -669,7 +669,7 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring)\n \n \t/* Free all the Rx ring sk_buffs */\n \tfor (i = 0; i < rx_ring->count; i++) {\n-\t\tstruct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];\n+\t\tstruct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];\n \n \t\tif (!rx_bi->page)\n \t\t\tcontinue;\n@@ -685,9 +685,9 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring)\n \n \t\t/* free resources associated with mapping */\n \t\tdma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,\n-\t\t\t\t     i40e_rx_pg_size(rx_ring),\n+\t\t\t\t     iavf_rx_pg_size(rx_ring),\n \t\t\t\t     DMA_FROM_DEVICE,\n-\t\t\t\t     I40E_RX_DMA_ATTR);\n+\t\t\t\t     IAVF_RX_DMA_ATTR);\n \n \t\t__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);\n \n@@ -695,7 +695,7 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring)\n \t\trx_bi->page_offset = 0;\n \t}\n \n-\tbi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;\n+\tbi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;\n \tmemset(rx_ring->rx_bi, 0, bi_size);\n \n \t/* Zero out the descriptor ring */\n@@ -712,7 +712,7 @@ void iavf_clean_rx_ring(struct i40e_ring *rx_ring)\n  *\n  * Free all receive software resources\n  **/\n-void iavf_free_rx_resources(struct i40e_ring *rx_ring)\n+void iavf_free_rx_resources(struct iavf_ring *rx_ring)\n {\n \tiavf_clean_rx_ring(rx_ring);\n \tkfree(rx_ring->rx_bi);\n@@ -731,14 +731,14 @@ void iavf_free_rx_resources(struct i40e_ring *rx_ring)\n  *\n  * Returns 0 on success, negative on failure\n  **/\n-int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring)\n+int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)\n {\n \tstruct device *dev = rx_ring->dev;\n \tint bi_size;\n \n \t/* warn if we are about to overwrite the pointer */\n \tWARN_ON(rx_ring->rx_bi);\n-\tbi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;\n+\tbi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;\n \trx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);\n \tif (!rx_ring->rx_bi)\n \t\tgoto err;\n@@ -746,7 +746,7 @@ int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring)\n \tu64_stats_init(&rx_ring->syncp);\n \n \t/* Round up to nearest 4K */\n-\trx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);\n+\trx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);\n \trx_ring->size = ALIGN(rx_ring->size, 4096);\n \trx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,\n \t\t\t\t\t   &rx_ring->dma, GFP_KERNEL);\n@@ -769,11 +769,11 @@ int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring)\n }\n \n /**\n- * i40e_release_rx_desc - Store the new tail and head values\n+ * iavf_release_rx_desc - Store the new tail and head values\n  * @rx_ring: ring to bump\n  * @val: new head index\n  **/\n-static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)\n+static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)\n {\n \trx_ring->next_to_use = val;\n \n@@ -790,26 +790,26 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)\n }\n \n /**\n- * i40e_rx_offset - Return expected offset into page to access data\n+ * iavf_rx_offset - Return expected offset into page to access data\n  * @rx_ring: Ring we are requesting offset of\n  *\n  * Returns the offset value for ring into the data buffer.\n  */\n-static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)\n+static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)\n {\n-\treturn ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;\n+\treturn ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;\n }\n \n /**\n- * i40e_alloc_mapped_page - recycle or make a new page\n+ * iavf_alloc_mapped_page - recycle or make a new page\n  * @rx_ring: ring to use\n  * @bi: rx_buffer struct to modify\n  *\n  * Returns true if the page was successfully allocated or\n  * reused.\n  **/\n-static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,\n-\t\t\t\t   struct i40e_rx_buffer *bi)\n+static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,\n+\t\t\t\t   struct iavf_rx_buffer *bi)\n {\n \tstruct page *page = bi->page;\n \tdma_addr_t dma;\n@@ -821,7 +821,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,\n \t}\n \n \t/* alloc new page for storage */\n-\tpage = dev_alloc_pages(i40e_rx_pg_order(rx_ring));\n+\tpage = dev_alloc_pages(iavf_rx_pg_order(rx_ring));\n \tif (unlikely(!page)) {\n \t\trx_ring->rx_stats.alloc_page_failed++;\n \t\treturn false;\n@@ -829,22 +829,22 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,\n \n \t/* map page for use */\n \tdma = dma_map_page_attrs(rx_ring->dev, page, 0,\n-\t\t\t\t i40e_rx_pg_size(rx_ring),\n+\t\t\t\t iavf_rx_pg_size(rx_ring),\n \t\t\t\t DMA_FROM_DEVICE,\n-\t\t\t\t I40E_RX_DMA_ATTR);\n+\t\t\t\t IAVF_RX_DMA_ATTR);\n \n \t/* if mapping failed free memory back to system since\n \t * there isn't much point in holding memory we can't use\n \t */\n \tif (dma_mapping_error(rx_ring->dev, dma)) {\n-\t\t__free_pages(page, i40e_rx_pg_order(rx_ring));\n+\t\t__free_pages(page, iavf_rx_pg_order(rx_ring));\n \t\trx_ring->rx_stats.alloc_page_failed++;\n \t\treturn false;\n \t}\n \n \tbi->dma = dma;\n \tbi->page = page;\n-\tbi->page_offset = i40e_rx_offset(rx_ring);\n+\tbi->page_offset = iavf_rx_offset(rx_ring);\n \n \t/* initialize pagecnt_bias to 1 representing we fully own page */\n \tbi->pagecnt_bias = 1;\n@@ -853,15 +853,15 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_receive_skb - Send a completed packet up the stack\n+ * iavf_receive_skb - Send a completed packet up the stack\n  * @rx_ring:  rx ring in play\n  * @skb: packet to send up\n  * @vlan_tag: vlan tag for packet\n  **/\n-static void i40e_receive_skb(struct i40e_ring *rx_ring,\n+static void iavf_receive_skb(struct iavf_ring *rx_ring,\n \t\t\t     struct sk_buff *skb, u16 vlan_tag)\n {\n-\tstruct i40e_q_vector *q_vector = rx_ring->q_vector;\n+\tstruct iavf_q_vector *q_vector = rx_ring->q_vector;\n \n \tif ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&\n \t    (vlan_tag & VLAN_VID_MASK))\n@@ -877,11 +877,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,\n  *\n  * Returns false if all allocations were successful, true if any fail\n  **/\n-bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)\n+bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)\n {\n \tu16 ntu = rx_ring->next_to_use;\n-\tunion i40e_rx_desc *rx_desc;\n-\tstruct i40e_rx_buffer *bi;\n+\tunion iavf_rx_desc *rx_desc;\n+\tstruct iavf_rx_buffer *bi;\n \n \t/* do nothing if no valid netdev defined */\n \tif (!rx_ring->netdev || !cleaned_count)\n@@ -891,7 +891,7 @@ bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)\n \tbi = &rx_ring->rx_bi[ntu];\n \n \tdo {\n-\t\tif (!i40e_alloc_mapped_page(rx_ring, bi))\n+\t\tif (!iavf_alloc_mapped_page(rx_ring, bi))\n \t\t\tgoto no_buffers;\n \n \t\t/* sync the buffer for use by the device */\n@@ -921,13 +921,13 @@ bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)\n \t} while (cleaned_count);\n \n \tif (rx_ring->next_to_use != ntu)\n-\t\ti40e_release_rx_desc(rx_ring, ntu);\n+\t\tiavf_release_rx_desc(rx_ring, ntu);\n \n \treturn false;\n \n no_buffers:\n \tif (rx_ring->next_to_use != ntu)\n-\t\ti40e_release_rx_desc(rx_ring, ntu);\n+\t\tiavf_release_rx_desc(rx_ring, ntu);\n \n \t/* make sure to come back via polling to try again after\n \t * allocation failure\n@@ -936,27 +936,27 @@ bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)\n }\n \n /**\n- * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum\n+ * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum\n  * @vsi: the VSI we care about\n  * @skb: skb currently being received and modified\n  * @rx_desc: the receive descriptor\n  **/\n-static inline void i40e_rx_checksum(struct i40e_vsi *vsi,\n+static inline void iavf_rx_checksum(struct iavf_vsi *vsi,\n \t\t\t\t    struct sk_buff *skb,\n-\t\t\t\t    union i40e_rx_desc *rx_desc)\n+\t\t\t\t    union iavf_rx_desc *rx_desc)\n {\n-\tstruct i40e_rx_ptype_decoded decoded;\n+\tstruct iavf_rx_ptype_decoded decoded;\n \tu32 rx_error, rx_status;\n \tbool ipv4, ipv6;\n \tu8 ptype;\n \tu64 qword;\n \n \tqword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);\n-\tptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;\n-\trx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>\n-\t\t   I40E_RXD_QW1_ERROR_SHIFT;\n-\trx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>\n-\t\t    I40E_RXD_QW1_STATUS_SHIFT;\n+\tptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;\n+\trx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>\n+\t\t   IAVF_RXD_QW1_ERROR_SHIFT;\n+\trx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>\n+\t\t    IAVF_RXD_QW1_STATUS_SHIFT;\n \tdecoded = decode_rx_desc_ptype(ptype);\n \n \tskb->ip_summed = CHECKSUM_NONE;\n@@ -975,10 +975,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,\n \tif (!(decoded.known && decoded.outer_ip))\n \t\treturn;\n \n-\tipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&\n-\t       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);\n-\tipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&\n-\t       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);\n+\tipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&\n+\t       (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);\n+\tipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&\n+\t       (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);\n \n \tif (ipv4 &&\n \t    (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |\n@@ -1004,9 +1004,9 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,\n \n \t/* Only report checksum unnecessary for TCP, UDP, or SCTP */\n \tswitch (decoded.inner_prot) {\n-\tcase I40E_RX_PTYPE_INNER_PROT_TCP:\n-\tcase I40E_RX_PTYPE_INNER_PROT_UDP:\n-\tcase I40E_RX_PTYPE_INNER_PROT_SCTP:\n+\tcase IAVF_RX_PTYPE_INNER_PROT_TCP:\n+\tcase IAVF_RX_PTYPE_INNER_PROT_UDP:\n+\tcase IAVF_RX_PTYPE_INNER_PROT_SCTP:\n \t\tskb->ip_summed = CHECKSUM_UNNECESSARY;\n \t\t/* fall though */\n \tdefault:\n@@ -1020,37 +1020,37 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,\n }\n \n /**\n- * i40e_ptype_to_htype - get a hash type\n+ * iavf_ptype_to_htype - get a hash type\n  * @ptype: the ptype value from the descriptor\n  *\n  * Returns a hash type to be used by skb_set_hash\n  **/\n-static inline int i40e_ptype_to_htype(u8 ptype)\n+static inline int iavf_ptype_to_htype(u8 ptype)\n {\n-\tstruct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);\n+\tstruct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);\n \n \tif (!decoded.known)\n \t\treturn PKT_HASH_TYPE_NONE;\n \n-\tif (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&\n-\t    decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)\n+\tif (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&\n+\t    decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)\n \t\treturn PKT_HASH_TYPE_L4;\n-\telse if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&\n-\t\t decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)\n+\telse if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&\n+\t\t decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)\n \t\treturn PKT_HASH_TYPE_L3;\n \telse\n \t\treturn PKT_HASH_TYPE_L2;\n }\n \n /**\n- * i40e_rx_hash - set the hash value in the skb\n+ * iavf_rx_hash - set the hash value in the skb\n  * @ring: descriptor ring\n  * @rx_desc: specific descriptor\n  * @skb: skb currently being received and modified\n  * @rx_ptype: Rx packet type\n  **/\n-static inline void i40e_rx_hash(struct i40e_ring *ring,\n-\t\t\t\tunion i40e_rx_desc *rx_desc,\n+static inline void iavf_rx_hash(struct iavf_ring *ring,\n+\t\t\t\tunion iavf_rx_desc *rx_desc,\n \t\t\t\tstruct sk_buff *skb,\n \t\t\t\tu8 rx_ptype)\n {\n@@ -1064,7 +1064,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,\n \n \tif ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {\n \t\thash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);\n-\t\tskb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));\n+\t\tskb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));\n \t}\n }\n \n@@ -1080,13 +1080,13 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,\n  * other fields within the skb.\n  **/\n static inline\n-void iavf_process_skb_fields(struct i40e_ring *rx_ring,\n-\t\t\t       union i40e_rx_desc *rx_desc, struct sk_buff *skb,\n+void iavf_process_skb_fields(struct iavf_ring *rx_ring,\n+\t\t\t       union iavf_rx_desc *rx_desc, struct sk_buff *skb,\n \t\t\t       u8 rx_ptype)\n {\n-\ti40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);\n+\tiavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);\n \n-\ti40e_rx_checksum(rx_ring->vsi, skb, rx_desc);\n+\tiavf_rx_checksum(rx_ring->vsi, skb, rx_desc);\n \n \tskb_record_rx_queue(skb, rx_ring->queue_index);\n \n@@ -1095,7 +1095,7 @@ void iavf_process_skb_fields(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_cleanup_headers - Correct empty headers\n+ * iavf_cleanup_headers - Correct empty headers\n  * @rx_ring: rx descriptor ring packet is being transacted on\n  * @skb: pointer to current skb being fixed\n  *\n@@ -1107,7 +1107,7 @@ void iavf_process_skb_fields(struct i40e_ring *rx_ring,\n  *\n  * Returns true if an error was encountered and skb was freed.\n  **/\n-static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)\n+static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)\n {\n \t/* if eth_skb_pad returns an error the skb was freed */\n \tif (eth_skb_pad(skb))\n@@ -1117,16 +1117,16 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)\n }\n \n /**\n- * i40e_reuse_rx_page - page flip buffer and store it back on the ring\n+ * iavf_reuse_rx_page - page flip buffer and store it back on the ring\n  * @rx_ring: rx descriptor ring to store buffers on\n  * @old_buff: donor buffer to have page reused\n  *\n  * Synchronizes page for reuse by the adapter\n  **/\n-static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,\n-\t\t\t       struct i40e_rx_buffer *old_buff)\n+static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,\n+\t\t\t       struct iavf_rx_buffer *old_buff)\n {\n-\tstruct i40e_rx_buffer *new_buff;\n+\tstruct iavf_rx_buffer *new_buff;\n \tu16 nta = rx_ring->next_to_alloc;\n \n \tnew_buff = &rx_ring->rx_bi[nta];\n@@ -1143,20 +1143,20 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_page_is_reusable - check if any reuse is possible\n+ * iavf_page_is_reusable - check if any reuse is possible\n  * @page: page struct to check\n  *\n  * A page is not reusable if it was allocated under low memory\n  * conditions, or it's not in the same NUMA node as this CPU.\n  */\n-static inline bool i40e_page_is_reusable(struct page *page)\n+static inline bool iavf_page_is_reusable(struct page *page)\n {\n \treturn (page_to_nid(page) == numa_mem_id()) &&\n \t\t!page_is_pfmemalloc(page);\n }\n \n /**\n- * i40e_can_reuse_rx_page - Determine if this page can be reused by\n+ * iavf_can_reuse_rx_page - Determine if this page can be reused by\n  * the adapter for another receive\n  *\n  * @rx_buffer: buffer containing the page\n@@ -1182,13 +1182,13 @@ static inline bool i40e_page_is_reusable(struct page *page)\n  *\n  * In either case, if the page is reusable its refcount is increased.\n  **/\n-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)\n+static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)\n {\n \tunsigned int pagecnt_bias = rx_buffer->pagecnt_bias;\n \tstruct page *page = rx_buffer->page;\n \n \t/* Is any reuse possible? */\n-\tif (unlikely(!i40e_page_is_reusable(page)))\n+\tif (unlikely(!iavf_page_is_reusable(page)))\n \t\treturn false;\n \n #if (PAGE_SIZE < 8192)\n@@ -1196,9 +1196,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)\n \tif (unlikely((page_count(page) - pagecnt_bias) > 1))\n \t\treturn false;\n #else\n-#define I40E_LAST_OFFSET \\\n-\t(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)\n-\tif (rx_buffer->page_offset > I40E_LAST_OFFSET)\n+#define IAVF_LAST_OFFSET \\\n+\t(SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)\n+\tif (rx_buffer->page_offset > IAVF_LAST_OFFSET)\n \t\treturn false;\n #endif\n \n@@ -1215,7 +1215,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)\n }\n \n /**\n- * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff\n+ * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff\n  * @rx_ring: rx descriptor ring to transact packets on\n  * @rx_buffer: buffer containing page to add\n  * @skb: sk_buff to place the data into\n@@ -1226,15 +1226,15 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)\n  *\n  * The function will then update the page offset.\n  **/\n-static void i40e_add_rx_frag(struct i40e_ring *rx_ring,\n-\t\t\t     struct i40e_rx_buffer *rx_buffer,\n+static void iavf_add_rx_frag(struct iavf_ring *rx_ring,\n+\t\t\t     struct iavf_rx_buffer *rx_buffer,\n \t\t\t     struct sk_buff *skb,\n \t\t\t     unsigned int size)\n {\n #if (PAGE_SIZE < 8192)\n-\tunsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;\n+\tunsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;\n #else\n-\tunsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));\n+\tunsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));\n #endif\n \n \tskb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,\n@@ -1249,17 +1249,17 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use\n+ * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use\n  * @rx_ring: rx descriptor ring to transact packets on\n  * @size: size of buffer to add to skb\n  *\n  * This function will pull an Rx buffer from the ring and synchronize it\n  * for use by the CPU.\n  */\n-static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,\n+static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,\n \t\t\t\t\t\t const unsigned int size)\n {\n-\tstruct i40e_rx_buffer *rx_buffer;\n+\tstruct iavf_rx_buffer *rx_buffer;\n \n \trx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];\n \tprefetchw(rx_buffer->page);\n@@ -1278,7 +1278,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_construct_skb - Allocate skb and populate it\n+ * iavf_construct_skb - Allocate skb and populate it\n  * @rx_ring: rx descriptor ring to transact packets on\n  * @rx_buffer: rx buffer to pull data from\n  * @size: size of buffer to add to skb\n@@ -1287,13 +1287,13 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,\n  * data from the current receive descriptor, taking care to set up the\n  * skb correctly.\n  */\n-static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,\n-\t\t\t\t\t  struct i40e_rx_buffer *rx_buffer,\n+static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,\n+\t\t\t\t\t  struct iavf_rx_buffer *rx_buffer,\n \t\t\t\t\t  unsigned int size)\n {\n \tvoid *va = page_address(rx_buffer->page) + rx_buffer->page_offset;\n #if (PAGE_SIZE < 8192)\n-\tunsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;\n+\tunsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;\n #else\n \tunsigned int truesize = SKB_DATA_ALIGN(size);\n #endif\n@@ -1308,15 +1308,15 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,\n \n \t/* allocate a skb to store the frags */\n \tskb = __napi_alloc_skb(&rx_ring->q_vector->napi,\n-\t\t\t       I40E_RX_HDR_SIZE,\n+\t\t\t       IAVF_RX_HDR_SIZE,\n \t\t\t       GFP_ATOMIC | __GFP_NOWARN);\n \tif (unlikely(!skb))\n \t\treturn NULL;\n \n \t/* Determine available headroom for copy */\n \theadlen = size;\n-\tif (headlen > I40E_RX_HDR_SIZE)\n-\t\theadlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);\n+\tif (headlen > IAVF_RX_HDR_SIZE)\n+\t\theadlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE);\n \n \t/* align pull length to size of long to optimize memcpy performance */\n \tmemcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));\n@@ -1343,7 +1343,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_build_skb - Build skb around an existing buffer\n+ * iavf_build_skb - Build skb around an existing buffer\n  * @rx_ring: Rx descriptor ring to transact packets on\n  * @rx_buffer: Rx buffer to pull data from\n  * @size: size of buffer to add to skb\n@@ -1351,16 +1351,16 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,\n  * This function builds an skb around an existing Rx buffer, taking care\n  * to set up the skb correctly and avoid any memcpy overhead.\n  */\n-static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,\n-\t\t\t\t      struct i40e_rx_buffer *rx_buffer,\n+static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,\n+\t\t\t\t      struct iavf_rx_buffer *rx_buffer,\n \t\t\t\t      unsigned int size)\n {\n \tvoid *va = page_address(rx_buffer->page) + rx_buffer->page_offset;\n #if (PAGE_SIZE < 8192)\n-\tunsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;\n+\tunsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;\n #else\n \tunsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +\n-\t\t\t\tSKB_DATA_ALIGN(I40E_SKB_PAD + size);\n+\t\t\t\tSKB_DATA_ALIGN(IAVF_SKB_PAD + size);\n #endif\n \tstruct sk_buff *skb;\n \n@@ -1370,12 +1370,12 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,\n \tprefetch(va + L1_CACHE_BYTES);\n #endif\n \t/* build an skb around the page buffer */\n-\tskb = build_skb(va - I40E_SKB_PAD, truesize);\n+\tskb = build_skb(va - IAVF_SKB_PAD, truesize);\n \tif (unlikely(!skb))\n \t\treturn NULL;\n \n \t/* update pointers within the skb to store the data */\n-\tskb_reserve(skb, I40E_SKB_PAD);\n+\tskb_reserve(skb, IAVF_SKB_PAD);\n \t__skb_put(skb, size);\n \n \t/* buffer is used by skb, update page_offset */\n@@ -1389,25 +1389,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_put_rx_buffer - Clean up used buffer and either recycle or free\n+ * iavf_put_rx_buffer - Clean up used buffer and either recycle or free\n  * @rx_ring: rx descriptor ring to transact packets on\n  * @rx_buffer: rx buffer to pull data from\n  *\n  * This function will clean up the contents of the rx_buffer.  It will\n  * either recycle the buffer or unmap it and free the associated resources.\n  */\n-static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,\n-\t\t\t       struct i40e_rx_buffer *rx_buffer)\n+static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,\n+\t\t\t       struct iavf_rx_buffer *rx_buffer)\n {\n-\tif (i40e_can_reuse_rx_page(rx_buffer)) {\n+\tif (iavf_can_reuse_rx_page(rx_buffer)) {\n \t\t/* hand second half of page back to the ring */\n-\t\ti40e_reuse_rx_page(rx_ring, rx_buffer);\n+\t\tiavf_reuse_rx_page(rx_ring, rx_buffer);\n \t\trx_ring->rx_stats.page_reuse_count++;\n \t} else {\n \t\t/* we are not reusing the buffer so unmap it */\n \t\tdma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,\n-\t\t\t\t     i40e_rx_pg_size(rx_ring),\n-\t\t\t\t     DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);\n+\t\t\t\t     iavf_rx_pg_size(rx_ring),\n+\t\t\t\t     DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);\n \t\t__page_frag_cache_drain(rx_buffer->page,\n \t\t\t\t\trx_buffer->pagecnt_bias);\n \t}\n@@ -1417,7 +1417,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_is_non_eop - process handling of non-EOP buffers\n+ * iavf_is_non_eop - process handling of non-EOP buffers\n  * @rx_ring: Rx ring being processed\n  * @rx_desc: Rx descriptor for current buffer\n  * @skb: Current socket buffer containing buffer in progress\n@@ -1427,8 +1427,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,\n  * sk_buff in the next buffer to be chained and return true indicating\n  * that this is in fact a non-EOP buffer.\n  **/\n-static bool i40e_is_non_eop(struct i40e_ring *rx_ring,\n-\t\t\t    union i40e_rx_desc *rx_desc,\n+static bool iavf_is_non_eop(struct iavf_ring *rx_ring,\n+\t\t\t    union iavf_rx_desc *rx_desc,\n \t\t\t    struct sk_buff *skb)\n {\n \tu32 ntc = rx_ring->next_to_clean + 1;\n@@ -1440,8 +1440,8 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,\n \tprefetch(IAVF_RX_DESC(rx_ring, ntc));\n \n \t/* if we are the last buffer then there is nothing else to do */\n-#define I40E_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)\n-\tif (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))\n+#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)\n+\tif (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))\n \t\treturn false;\n \n \trx_ring->rx_stats.non_eop_descs++;\n@@ -1450,7 +1450,7 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf\n+ * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf\n  * @rx_ring: rx descriptor ring to transact packets on\n  * @budget: Total limit on number of packets to process\n  *\n@@ -1461,23 +1461,23 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,\n  *\n  * Returns amount of work completed\n  **/\n-static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)\n+static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)\n {\n \tunsigned int total_rx_bytes = 0, total_rx_packets = 0;\n \tstruct sk_buff *skb = rx_ring->skb;\n-\tu16 cleaned_count = I40E_DESC_UNUSED(rx_ring);\n+\tu16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);\n \tbool failure = false;\n \n \twhile (likely(total_rx_packets < (unsigned int)budget)) {\n-\t\tstruct i40e_rx_buffer *rx_buffer;\n-\t\tunion i40e_rx_desc *rx_desc;\n+\t\tstruct iavf_rx_buffer *rx_buffer;\n+\t\tunion iavf_rx_desc *rx_desc;\n \t\tunsigned int size;\n \t\tu16 vlan_tag;\n \t\tu8 rx_ptype;\n \t\tu64 qword;\n \n \t\t/* return some buffers to hardware, one at a time is too slow */\n-\t\tif (cleaned_count >= I40E_RX_BUFFER_WRITE) {\n+\t\tif (cleaned_count >= IAVF_RX_BUFFER_WRITE) {\n \t\t\tfailure = failure ||\n \t\t\t\t  iavf_alloc_rx_buffers(rx_ring, cleaned_count);\n \t\t\tcleaned_count = 0;\n@@ -1498,21 +1498,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)\n \t\t */\n \t\tdma_rmb();\n \n-\t\tsize = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>\n-\t\t       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;\n+\t\tsize = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>\n+\t\t       IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;\n \t\tif (!size)\n \t\t\tbreak;\n \n \t\tiavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);\n-\t\trx_buffer = i40e_get_rx_buffer(rx_ring, size);\n+\t\trx_buffer = iavf_get_rx_buffer(rx_ring, size);\n \n \t\t/* retrieve a buffer from the ring */\n \t\tif (skb)\n-\t\t\ti40e_add_rx_frag(rx_ring, rx_buffer, skb, size);\n+\t\t\tiavf_add_rx_frag(rx_ring, rx_buffer, skb, size);\n \t\telse if (ring_uses_build_skb(rx_ring))\n-\t\t\tskb = i40e_build_skb(rx_ring, rx_buffer, size);\n+\t\t\tskb = iavf_build_skb(rx_ring, rx_buffer, size);\n \t\telse\n-\t\t\tskb = i40e_construct_skb(rx_ring, rx_buffer, size);\n+\t\t\tskb = iavf_construct_skb(rx_ring, rx_buffer, size);\n \n \t\t/* exit if we failed to retrieve a buffer */\n \t\tif (!skb) {\n@@ -1521,10 +1521,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)\n \t\t\tbreak;\n \t\t}\n \n-\t\ti40e_put_rx_buffer(rx_ring, rx_buffer);\n+\t\tiavf_put_rx_buffer(rx_ring, rx_buffer);\n \t\tcleaned_count++;\n \n-\t\tif (i40e_is_non_eop(rx_ring, rx_desc, skb))\n+\t\tif (iavf_is_non_eop(rx_ring, rx_desc, skb))\n \t\t\tcontinue;\n \n \t\t/* ERR_MASK will only have valid bits if EOP set, and\n@@ -1532,13 +1532,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)\n \t\t * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in\n \t\t * the error field\n \t\t */\n-\t\tif (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {\n+\t\tif (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {\n \t\t\tdev_kfree_skb_any(skb);\n \t\t\tskb = NULL;\n \t\t\tcontinue;\n \t\t}\n \n-\t\tif (i40e_cleanup_headers(rx_ring, skb)) {\n+\t\tif (iavf_cleanup_headers(rx_ring, skb)) {\n \t\t\tskb = NULL;\n \t\t\tcontinue;\n \t\t}\n@@ -1547,8 +1547,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)\n \t\ttotal_rx_bytes += skb->len;\n \n \t\tqword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);\n-\t\trx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>\n-\t\t\t   I40E_RXD_QW1_PTYPE_SHIFT;\n+\t\trx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>\n+\t\t\t   IAVF_RXD_QW1_PTYPE_SHIFT;\n \n \t\t/* populate checksum, VLAN, and protocol */\n \t\tiavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);\n@@ -1558,7 +1558,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)\n \t\t\t   le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;\n \n \t\tiavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);\n-\t\ti40e_receive_skb(rx_ring, skb, vlan_tag);\n+\t\tiavf_receive_skb(rx_ring, skb, vlan_tag);\n \t\tskb = NULL;\n \n \t\t/* update budget accounting */\n@@ -1578,7 +1578,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)\n \treturn failure ? budget : (int)total_rx_packets;\n }\n \n-static inline u32 i40e_buildreg_itr(const int type, u16 itr)\n+static inline u32 iavf_buildreg_itr(const int type, u16 itr)\n {\n \tu32 val;\n \n@@ -1597,7 +1597,7 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)\n \t * only need to shift by the interval shift - 1 instead of the\n \t * full value.\n \t */\n-\titr &= I40E_ITR_MASK;\n+\titr &= IAVF_ITR_MASK;\n \n \tval = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |\n \t      (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |\n@@ -1619,20 +1619,20 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)\n #define ITR_COUNTDOWN_START 3\n \n /**\n- * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt\n+ * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt\n  * @vsi: the VSI we care about\n  * @q_vector: q_vector for which itr is being updated and interrupt enabled\n  *\n  **/\n-static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,\n-\t\t\t\t\t  struct i40e_q_vector *q_vector)\n+static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,\n+\t\t\t\t\t  struct iavf_q_vector *q_vector)\n {\n \tstruct iavf_hw *hw = &vsi->back->hw;\n \tu32 intval;\n \n \t/* These will do nothing if dynamic updates are not enabled */\n-\ti40e_update_itr(q_vector, &q_vector->tx);\n-\ti40e_update_itr(q_vector, &q_vector->rx);\n+\tiavf_update_itr(q_vector, &q_vector->tx);\n+\tiavf_update_itr(q_vector, &q_vector->rx);\n \n \t/* This block of logic allows us to get away with only updating\n \t * one ITR value with each interrupt. The idea is to perform a\n@@ -1644,7 +1644,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,\n \t */\n \tif (q_vector->rx.target_itr < q_vector->rx.current_itr) {\n \t\t/* Rx ITR needs to be reduced, this is highest priority */\n-\t\tintval = i40e_buildreg_itr(I40E_RX_ITR,\n+\t\tintval = iavf_buildreg_itr(IAVF_RX_ITR,\n \t\t\t\t\t   q_vector->rx.target_itr);\n \t\tq_vector->rx.current_itr = q_vector->rx.target_itr;\n \t\tq_vector->itr_countdown = ITR_COUNTDOWN_START;\n@@ -1654,24 +1654,24 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,\n \t\t/* Tx ITR needs to be reduced, this is second priority\n \t\t * Tx ITR needs to be increased more than Rx, fourth priority\n \t\t */\n-\t\tintval = i40e_buildreg_itr(I40E_TX_ITR,\n+\t\tintval = iavf_buildreg_itr(IAVF_TX_ITR,\n \t\t\t\t\t   q_vector->tx.target_itr);\n \t\tq_vector->tx.current_itr = q_vector->tx.target_itr;\n \t\tq_vector->itr_countdown = ITR_COUNTDOWN_START;\n \t} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {\n \t\t/* Rx ITR needs to be increased, third priority */\n-\t\tintval = i40e_buildreg_itr(I40E_RX_ITR,\n+\t\tintval = iavf_buildreg_itr(IAVF_RX_ITR,\n \t\t\t\t\t   q_vector->rx.target_itr);\n \t\tq_vector->rx.current_itr = q_vector->rx.target_itr;\n \t\tq_vector->itr_countdown = ITR_COUNTDOWN_START;\n \t} else {\n \t\t/* No ITR update, lowest priority */\n-\t\tintval = i40e_buildreg_itr(I40E_ITR_NONE, 0);\n+\t\tintval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);\n \t\tif (q_vector->itr_countdown)\n \t\t\tq_vector->itr_countdown--;\n \t}\n \n-\tif (!test_bit(__I40E_VSI_DOWN, vsi->state))\n+\tif (!test_bit(__IAVF_VSI_DOWN, vsi->state))\n \t\twr32(hw, INTREG(q_vector->reg_idx), intval);\n }\n \n@@ -1686,16 +1686,16 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,\n  **/\n int iavf_napi_poll(struct napi_struct *napi, int budget)\n {\n-\tstruct i40e_q_vector *q_vector =\n-\t\t\t       container_of(napi, struct i40e_q_vector, napi);\n-\tstruct i40e_vsi *vsi = q_vector->vsi;\n-\tstruct i40e_ring *ring;\n+\tstruct iavf_q_vector *q_vector =\n+\t\t\t       container_of(napi, struct iavf_q_vector, napi);\n+\tstruct iavf_vsi *vsi = q_vector->vsi;\n+\tstruct iavf_ring *ring;\n \tbool clean_complete = true;\n \tbool arm_wb = false;\n \tint budget_per_ring;\n \tint work_done = 0;\n \n-\tif (test_bit(__I40E_VSI_DOWN, vsi->state)) {\n+\tif (test_bit(__IAVF_VSI_DOWN, vsi->state)) {\n \t\tnapi_complete(napi);\n \t\treturn 0;\n \t}\n@@ -1703,8 +1703,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)\n \t/* Since the actual Tx work is minimal, we can give the Tx a larger\n \t * budget and be more aggressive about cleaning up the Tx descriptors.\n \t */\n-\ti40e_for_each_ring(ring, q_vector->tx) {\n-\t\tif (!i40e_clean_tx_irq(vsi, ring, budget)) {\n+\tiavf_for_each_ring(ring, q_vector->tx) {\n+\t\tif (!iavf_clean_tx_irq(vsi, ring, budget)) {\n \t\t\tclean_complete = false;\n \t\t\tcontinue;\n \t\t}\n@@ -1721,8 +1721,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)\n \t */\n \tbudget_per_ring = max(budget/q_vector->num_ringpairs, 1);\n \n-\ti40e_for_each_ring(ring, q_vector->rx) {\n-\t\tint cleaned = i40e_clean_rx_irq(ring, budget_per_ring);\n+\tiavf_for_each_ring(ring, q_vector->rx) {\n+\t\tint cleaned = iavf_clean_rx_irq(ring, budget_per_ring);\n \n \t\twork_done += cleaned;\n \t\t/* if we clean as many as budgeted, we must not be done */\n@@ -1754,18 +1754,18 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)\n tx_only:\n \t\tif (arm_wb) {\n \t\t\tq_vector->tx.ring[0].tx_stats.tx_force_wb++;\n-\t\t\ti40e_enable_wb_on_itr(vsi, q_vector);\n+\t\t\tiavf_enable_wb_on_itr(vsi, q_vector);\n \t\t}\n \t\treturn budget;\n \t}\n \n-\tif (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)\n+\tif (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)\n \t\tq_vector->arm_wb_state = false;\n \n \t/* Work is done so exit the polling mode and re-enable the interrupt */\n \tnapi_complete_done(napi, work_done);\n \n-\ti40e_update_enable_itr(vsi, q_vector);\n+\tiavf_update_enable_itr(vsi, q_vector);\n \n \treturn min(work_done, budget - 1);\n }\n@@ -1783,7 +1783,7 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)\n  * otherwise  returns 0 to indicate the flags has been set properly.\n  **/\n static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,\n-\t\t\t\t\t       struct i40e_ring *tx_ring,\n+\t\t\t\t\t       struct iavf_ring *tx_ring,\n \t\t\t\t\t       u32 *flags)\n {\n \t__be16 protocol = skb->protocol;\n@@ -1804,8 +1804,8 @@ static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,\n \n \t/* if we have a HW VLAN tag being added, default to the HW one */\n \tif (skb_vlan_tag_present(skb)) {\n-\t\ttx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;\n-\t\ttx_flags |= I40E_TX_FLAGS_HW_VLAN;\n+\t\ttx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;\n+\t\ttx_flags |= IAVF_TX_FLAGS_HW_VLAN;\n \t/* else if it is a SW VLAN, check the next protocol and store the tag */\n \t} else if (protocol == htons(ETH_P_8021Q)) {\n \t\tstruct vlan_hdr *vhdr, _vhdr;\n@@ -1815,8 +1815,8 @@ static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,\n \t\t\treturn -EINVAL;\n \n \t\tprotocol = vhdr->h_vlan_encapsulated_proto;\n-\t\ttx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;\n-\t\ttx_flags |= I40E_TX_FLAGS_SW_VLAN;\n+\t\ttx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;\n+\t\ttx_flags |= IAVF_TX_FLAGS_SW_VLAN;\n \t}\n \n out:\n@@ -1825,14 +1825,14 @@ static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,\n }\n \n /**\n- * i40e_tso - set up the tso context descriptor\n+ * iavf_tso - set up the tso context descriptor\n  * @first:    pointer to first Tx buffer for xmit\n  * @hdr_len:  ptr to the size of the packet header\n  * @cd_type_cmd_tso_mss: Quad Word 1\n  *\n  * Returns 0 if no TSO can happen, 1 if tso is going, or error\n  **/\n-static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,\n+static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,\n \t\t    u64 *cd_type_cmd_tso_mss)\n {\n \tstruct sk_buff *skb = first->skb;\n@@ -1923,17 +1923,17 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,\n \tfirst->bytecount += (first->gso_segs - 1) * *hdr_len;\n \n \t/* find the field values */\n-\tcd_cmd = I40E_TX_CTX_DESC_TSO;\n+\tcd_cmd = IAVF_TX_CTX_DESC_TSO;\n \tcd_tso_len = skb->len - *hdr_len;\n \tcd_mss = gso_size;\n-\t*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |\n-\t\t\t\t(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |\n-\t\t\t\t(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);\n+\t*cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |\n+\t\t\t\t(cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |\n+\t\t\t\t(cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);\n \treturn 1;\n }\n \n /**\n- * i40e_tx_enable_csum - Enable Tx checksum offloads\n+ * iavf_tx_enable_csum - Enable Tx checksum offloads\n  * @skb: send buffer\n  * @tx_flags: pointer to Tx flags currently set\n  * @td_cmd: Tx descriptor command bits to set\n@@ -1941,9 +1941,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,\n  * @tx_ring: Tx descriptor ring\n  * @cd_tunneling: ptr to context desc bits\n  **/\n-static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,\n+static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,\n \t\t\t       u32 *td_cmd, u32 *td_offset,\n-\t\t\t       struct i40e_ring *tx_ring,\n+\t\t\t       struct iavf_ring *tx_ring,\n \t\t\t       u32 *cd_tunneling)\n {\n \tunion {\n@@ -1973,14 +1973,14 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,\n \tif (skb->encapsulation) {\n \t\tu32 tunnel = 0;\n \t\t/* define outer network header type */\n-\t\tif (*tx_flags & I40E_TX_FLAGS_IPV4) {\n-\t\t\ttunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?\n-\t\t\t\t  I40E_TX_CTX_EXT_IP_IPV4 :\n-\t\t\t\t  I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;\n+\t\tif (*tx_flags & IAVF_TX_FLAGS_IPV4) {\n+\t\t\ttunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?\n+\t\t\t\t  IAVF_TX_CTX_EXT_IP_IPV4 :\n+\t\t\t\t  IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;\n \n \t\t\tl4_proto = ip.v4->protocol;\n-\t\t} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {\n-\t\t\ttunnel |= I40E_TX_CTX_EXT_IP_IPV6;\n+\t\t} else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {\n+\t\t\ttunnel |= IAVF_TX_CTX_EXT_IP_IPV6;\n \n \t\t\texthdr = ip.hdr + sizeof(*ip.v6);\n \t\t\tl4_proto = ip.v6->nexthdr;\n@@ -1992,20 +1992,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,\n \t\t/* define outer transport */\n \t\tswitch (l4_proto) {\n \t\tcase IPPROTO_UDP:\n-\t\t\ttunnel |= I40E_TXD_CTX_UDP_TUNNELING;\n-\t\t\t*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;\n+\t\t\ttunnel |= IAVF_TXD_CTX_UDP_TUNNELING;\n+\t\t\t*tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;\n \t\t\tbreak;\n \t\tcase IPPROTO_GRE:\n-\t\t\ttunnel |= I40E_TXD_CTX_GRE_TUNNELING;\n-\t\t\t*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;\n+\t\t\ttunnel |= IAVF_TXD_CTX_GRE_TUNNELING;\n+\t\t\t*tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;\n \t\t\tbreak;\n \t\tcase IPPROTO_IPIP:\n \t\tcase IPPROTO_IPV6:\n-\t\t\t*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;\n+\t\t\t*tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;\n \t\t\tl4.hdr = skb_inner_network_header(skb);\n \t\t\tbreak;\n \t\tdefault:\n-\t\t\tif (*tx_flags & I40E_TX_FLAGS_TSO)\n+\t\t\tif (*tx_flags & IAVF_TX_FLAGS_TSO)\n \t\t\t\treturn -1;\n \n \t\t\tskb_checksum_help(skb);\n@@ -2014,20 +2014,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,\n \n \t\t/* compute outer L3 header size */\n \t\ttunnel |= ((l4.hdr - ip.hdr) / 4) <<\n-\t\t\t  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;\n+\t\t\t  IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;\n \n \t\t/* switch IP header pointer from outer to inner header */\n \t\tip.hdr = skb_inner_network_header(skb);\n \n \t\t/* compute tunnel header size */\n \t\ttunnel |= ((ip.hdr - l4.hdr) / 2) <<\n-\t\t\t  I40E_TXD_CTX_QW0_NATLEN_SHIFT;\n+\t\t\t  IAVF_TXD_CTX_QW0_NATLEN_SHIFT;\n \n \t\t/* indicate if we need to offload outer UDP header */\n-\t\tif ((*tx_flags & I40E_TX_FLAGS_TSO) &&\n+\t\tif ((*tx_flags & IAVF_TX_FLAGS_TSO) &&\n \t\t    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&\n \t\t    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))\n-\t\t\ttunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;\n+\t\t\ttunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;\n \n \t\t/* record tunnel offload values */\n \t\t*cd_tunneling |= tunnel;\n@@ -2037,23 +2037,23 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,\n \t\tl4_proto = 0;\n \n \t\t/* reset type as we transition from outer to inner headers */\n-\t\t*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);\n+\t\t*tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);\n \t\tif (ip.v4->version == 4)\n-\t\t\t*tx_flags |= I40E_TX_FLAGS_IPV4;\n+\t\t\t*tx_flags |= IAVF_TX_FLAGS_IPV4;\n \t\tif (ip.v6->version == 6)\n-\t\t\t*tx_flags |= I40E_TX_FLAGS_IPV6;\n+\t\t\t*tx_flags |= IAVF_TX_FLAGS_IPV6;\n \t}\n \n \t/* Enable IP checksum offloads */\n-\tif (*tx_flags & I40E_TX_FLAGS_IPV4) {\n+\tif (*tx_flags & IAVF_TX_FLAGS_IPV4) {\n \t\tl4_proto = ip.v4->protocol;\n \t\t/* the stack computes the IP header already, the only time we\n \t\t * need the hardware to recompute it is in the case of TSO.\n \t\t */\n-\t\tcmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?\n+\t\tcmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?\n \t\t       IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :\n \t\t       IAVF_TX_DESC_CMD_IIPT_IPV4;\n-\t} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {\n+\t} else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {\n \t\tcmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;\n \n \t\texthdr = ip.hdr + sizeof(*ip.v6);\n@@ -2086,7 +2086,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,\n \t\t\t  IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;\n \t\tbreak;\n \tdefault:\n-\t\tif (*tx_flags & I40E_TX_FLAGS_TSO)\n+\t\tif (*tx_flags & IAVF_TX_FLAGS_TSO)\n \t\t\treturn -1;\n \t\tskb_checksum_help(skb);\n \t\treturn 0;\n@@ -2099,17 +2099,17 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,\n }\n \n /**\n- * i40e_create_tx_ctx Build the Tx context descriptor\n+ * iavf_create_tx_ctx Build the Tx context descriptor\n  * @tx_ring:  ring to create the descriptor on\n  * @cd_type_cmd_tso_mss: Quad Word 1\n  * @cd_tunneling: Quad Word 0 - bits 0-31\n  * @cd_l2tag2: Quad Word 0 - bits 32-63\n  **/\n-static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,\n+static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,\n \t\t\t       const u64 cd_type_cmd_tso_mss,\n \t\t\t       const u32 cd_tunneling, const u32 cd_l2tag2)\n {\n-\tstruct i40e_tx_context_desc *context_desc;\n+\tstruct iavf_tx_context_desc *context_desc;\n \tint i = tx_ring->next_to_use;\n \n \tif ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&\n@@ -2149,13 +2149,13 @@ bool __iavf_chk_linearize(struct sk_buff *skb)\n \n \t/* no need to check if number of frags is less than 7 */\n \tnr_frags = skb_shinfo(skb)->nr_frags;\n-\tif (nr_frags < (I40E_MAX_BUFFER_TXD - 1))\n+\tif (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))\n \t\treturn false;\n \n \t/* We need to walk through the list and validate that each group\n \t * of 6 fragments totals at least gso_size.\n \t */\n-\tnr_frags -= I40E_MAX_BUFFER_TXD - 2;\n+\tnr_frags -= IAVF_MAX_BUFFER_TXD - 2;\n \tfrag = &skb_shinfo(skb)->frags[0];\n \n \t/* Initialize size to the negative value of gso_size minus 1.  We\n@@ -2187,17 +2187,17 @@ bool __iavf_chk_linearize(struct sk_buff *skb)\n \t\t * figure out what the remainder would be in the last\n \t\t * descriptor associated with the fragment.\n \t\t */\n-\t\tif (stale_size > I40E_MAX_DATA_PER_TXD) {\n+\t\tif (stale_size > IAVF_MAX_DATA_PER_TXD) {\n \t\t\tint align_pad = -(stale->page_offset) &\n-\t\t\t\t\t(I40E_MAX_READ_REQ_SIZE - 1);\n+\t\t\t\t\t(IAVF_MAX_READ_REQ_SIZE - 1);\n \n \t\t\tsum -= align_pad;\n \t\t\tstale_size -= align_pad;\n \n \t\t\tdo {\n-\t\t\t\tsum -= I40E_MAX_DATA_PER_TXD_ALIGNED;\n-\t\t\t\tstale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;\n-\t\t\t} while (stale_size > I40E_MAX_DATA_PER_TXD);\n+\t\t\t\tsum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;\n+\t\t\t\tstale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;\n+\t\t\t} while (stale_size > IAVF_MAX_DATA_PER_TXD);\n \t\t}\n \n \t\t/* if sum is negative we failed to make sufficient progress */\n@@ -2220,14 +2220,14 @@ bool __iavf_chk_linearize(struct sk_buff *skb)\n  *\n  * Returns -EBUSY if a stop is needed, else 0\n  **/\n-int __iavf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)\n+int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)\n {\n \tnetif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);\n \t/* Memory barrier before checking head and tail */\n \tsmp_mb();\n \n \t/* Check again in a case another CPU has just made room available. */\n-\tif (likely(I40E_DESC_UNUSED(tx_ring) < size))\n+\tif (likely(IAVF_DESC_UNUSED(tx_ring) < size))\n \t\treturn -EBUSY;\n \n \t/* A reprieve! - use start_queue because it doesn't call schedule */\n@@ -2246,23 +2246,23 @@ int __iavf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)\n  * @td_cmd:   the command field in the descriptor\n  * @td_offset: offset for checksum or crc\n  **/\n-static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,\n-\t\t\t\t struct i40e_tx_buffer *first, u32 tx_flags,\n+static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,\n+\t\t\t\t struct iavf_tx_buffer *first, u32 tx_flags,\n \t\t\t\t const u8 hdr_len, u32 td_cmd, u32 td_offset)\n {\n \tunsigned int data_len = skb->data_len;\n \tunsigned int size = skb_headlen(skb);\n \tstruct skb_frag_struct *frag;\n-\tstruct i40e_tx_buffer *tx_bi;\n-\tstruct i40e_tx_desc *tx_desc;\n+\tstruct iavf_tx_buffer *tx_bi;\n+\tstruct iavf_tx_desc *tx_desc;\n \tu16 i = tx_ring->next_to_use;\n \tu32 td_tag = 0;\n \tdma_addr_t dma;\n \n-\tif (tx_flags & I40E_TX_FLAGS_HW_VLAN) {\n+\tif (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {\n \t\ttd_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;\n-\t\ttd_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>\n-\t\t\t I40E_TX_FLAGS_VLAN_SHIFT;\n+\t\ttd_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>\n+\t\t\t IAVF_TX_FLAGS_VLAN_SHIFT;\n \t}\n \n \tfirst->tx_flags = tx_flags;\n@@ -2273,7 +2273,7 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,\n \ttx_bi = first;\n \n \tfor (frag = &skb_shinfo(skb)->frags[0];; frag++) {\n-\t\tunsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;\n+\t\tunsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;\n \n \t\tif (dma_mapping_error(tx_ring->dev, dma))\n \t\t\tgoto dma_error;\n@@ -2283,10 +2283,10 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,\n \t\tdma_unmap_addr_set(tx_bi, dma, dma);\n \n \t\t/* align size to end of page */\n-\t\tmax_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);\n+\t\tmax_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);\n \t\ttx_desc->buffer_addr = cpu_to_le64(dma);\n \n-\t\twhile (unlikely(size > I40E_MAX_DATA_PER_TXD)) {\n+\t\twhile (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {\n \t\t\ttx_desc->cmd_type_offset_bsz =\n \t\t\t\tbuild_ctob(td_cmd, td_offset,\n \t\t\t\t\t   max_data, td_tag);\n@@ -2302,7 +2302,7 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,\n \t\t\tdma += max_data;\n \t\t\tsize -= max_data;\n \n-\t\t\tmax_data = I40E_MAX_DATA_PER_TXD_ALIGNED;\n+\t\t\tmax_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;\n \t\t\ttx_desc->buffer_addr = cpu_to_le64(dma);\n \t\t}\n \n@@ -2337,10 +2337,10 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,\n \n \ttx_ring->next_to_use = i;\n \n-\ti40e_maybe_stop_tx(tx_ring, DESC_NEEDED);\n+\tiavf_maybe_stop_tx(tx_ring, DESC_NEEDED);\n \n \t/* write last descriptor with RS and EOP bits */\n-\ttd_cmd |= I40E_TXD_CMD;\n+\ttd_cmd |= IAVF_TXD_CMD;\n \ttx_desc->cmd_type_offset_bsz =\n \t\t\tbuild_ctob(td_cmd, td_offset, size, td_tag);\n \n@@ -2373,7 +2373,7 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,\n \t/* clear dma mappings for failed tx_bi map */\n \tfor (;;) {\n \t\ttx_bi = &tx_ring->tx_bi[i];\n-\t\ti40e_unmap_and_free_tx_resource(tx_ring, tx_bi);\n+\t\tiavf_unmap_and_free_tx_resource(tx_ring, tx_bi);\n \t\tif (tx_bi == first)\n \t\t\tbreak;\n \t\tif (i == 0)\n@@ -2385,18 +2385,18 @@ static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,\n }\n \n /**\n- * i40e_xmit_frame_ring - Sends buffer on Tx ring\n+ * iavf_xmit_frame_ring - Sends buffer on Tx ring\n  * @skb:     send buffer\n  * @tx_ring: ring to send buffer on\n  *\n  * Returns NETDEV_TX_OK if sent, else an error code\n  **/\n-static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,\n-\t\t\t\t\tstruct i40e_ring *tx_ring)\n+static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,\n+\t\t\t\t\tstruct iavf_ring *tx_ring)\n {\n \tu64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;\n \tu32 cd_tunneling = 0, cd_l2tag2 = 0;\n-\tstruct i40e_tx_buffer *first;\n+\tstruct iavf_tx_buffer *first;\n \tu32 td_offset = 0;\n \tu32 tx_flags = 0;\n \t__be16 protocol;\n@@ -2409,23 +2409,23 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,\n \n \tiavf_trace(xmit_frame_ring, skb, tx_ring);\n \n-\tcount = i40e_xmit_descriptor_count(skb);\n-\tif (i40e_chk_linearize(skb, count)) {\n+\tcount = iavf_xmit_descriptor_count(skb);\n+\tif (iavf_chk_linearize(skb, count)) {\n \t\tif (__skb_linearize(skb)) {\n \t\t\tdev_kfree_skb_any(skb);\n \t\t\treturn NETDEV_TX_OK;\n \t\t}\n-\t\tcount = i40e_txd_use_count(skb->len);\n+\t\tcount = iavf_txd_use_count(skb->len);\n \t\ttx_ring->tx_stats.tx_linearize++;\n \t}\n \n-\t/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,\n-\t *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,\n+\t/* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD,\n+\t *       + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD,\n \t *       + 4 desc gap to avoid the cache line where head is,\n \t *       + 1 desc for context descriptor,\n \t * otherwise try next time\n \t */\n-\tif (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {\n+\tif (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {\n \t\ttx_ring->tx_stats.tx_busy++;\n \t\treturn NETDEV_TX_BUSY;\n \t}\n@@ -2445,19 +2445,19 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,\n \n \t/* setup IPv4/IPv6 offloads */\n \tif (protocol == htons(ETH_P_IP))\n-\t\ttx_flags |= I40E_TX_FLAGS_IPV4;\n+\t\ttx_flags |= IAVF_TX_FLAGS_IPV4;\n \telse if (protocol == htons(ETH_P_IPV6))\n-\t\ttx_flags |= I40E_TX_FLAGS_IPV6;\n+\t\ttx_flags |= IAVF_TX_FLAGS_IPV6;\n \n-\ttso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);\n+\ttso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);\n \n \tif (tso < 0)\n \t\tgoto out_drop;\n \telse if (tso)\n-\t\ttx_flags |= I40E_TX_FLAGS_TSO;\n+\t\ttx_flags |= IAVF_TX_FLAGS_TSO;\n \n \t/* Always offload the checksum, since it's in the data descriptor */\n-\ttso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,\n+\ttso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,\n \t\t\t\t  tx_ring, &cd_tunneling);\n \tif (tso < 0)\n \t\tgoto out_drop;\n@@ -2467,7 +2467,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,\n \t/* always enable CRC insertion offload */\n \ttd_cmd |= IAVF_TX_DESC_CMD_ICRC;\n \n-\ti40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,\n+\tiavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,\n \t\t\t   cd_tunneling, cd_l2tag2);\n \n \tiavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,\n@@ -2492,17 +2492,17 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,\n netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)\n {\n \tstruct iavf_adapter *adapter = netdev_priv(netdev);\n-\tstruct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];\n+\tstruct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];\n \n \t/* hardware can't handle really short frames, hardware padding works\n \t * beyond this point\n \t */\n-\tif (unlikely(skb->len < I40E_MIN_TX_LEN)) {\n-\t\tif (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))\n+\tif (unlikely(skb->len < IAVF_MIN_TX_LEN)) {\n+\t\tif (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))\n \t\t\treturn NETDEV_TX_OK;\n-\t\tskb->len = I40E_MIN_TX_LEN;\n-\t\tskb_set_tail_pointer(skb, I40E_MIN_TX_LEN);\n+\t\tskb->len = IAVF_MIN_TX_LEN;\n+\t\tskb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);\n \t}\n \n-\treturn i40e_xmit_frame_ring(skb, tx_ring);\n+\treturn iavf_xmit_frame_ring(skb, tx_ring);\n }\ndiff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h\nindex db2ec715f3b5..71e7d090f8db 100644\n--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h\n+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h\n@@ -1,11 +1,11 @@\n /* SPDX-License-Identifier: GPL-2.0 */\n /* Copyright(c) 2013 - 2018 Intel Corporation. */\n \n-#ifndef _I40E_TXRX_H_\n-#define _I40E_TXRX_H_\n+#ifndef _IAVF_TXRX_H_\n+#define _IAVF_TXRX_H_\n \n /* Interrupt Throttling and Rate Limiting Goodies */\n-#define I40E_DEFAULT_IRQ_WORK      256\n+#define IAVF_DEFAULT_IRQ_WORK      256\n \n /* The datasheet for the X710 and XL710 indicate that the maximum value for\n  * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec\n@@ -13,80 +13,80 @@\n  * the register value which is divided by 2 lets use the actual values and\n  * avoid an excessive amount of translation.\n  */\n-#define I40E_ITR_DYNAMIC\t0x8000\t/* use top bit as a flag */\n-#define I40E_ITR_MASK\t\t0x1FFE\t/* mask for ITR register value */\n-#define I40E_MIN_ITR\t\t     2\t/* reg uses 2 usec resolution */\n-#define I40E_ITR_100K\t\t    10\t/* all values below must be even */\n-#define I40E_ITR_50K\t\t    20\n-#define I40E_ITR_20K\t\t    50\n-#define I40E_ITR_18K\t\t    60\n-#define I40E_ITR_8K\t\t   122\n-#define I40E_MAX_ITR\t\t  8160\t/* maximum value as per datasheet */\n-#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)\n-#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)\n-#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))\n-\n-#define I40E_ITR_RX_DEF\t\t(I40E_ITR_20K | I40E_ITR_DYNAMIC)\n-#define I40E_ITR_TX_DEF\t\t(I40E_ITR_20K | I40E_ITR_DYNAMIC)\n+#define IAVF_ITR_DYNAMIC\t0x8000\t/* use top bit as a flag */\n+#define IAVF_ITR_MASK\t\t0x1FFE\t/* mask for ITR register value */\n+#define IAVF_MIN_ITR\t\t     2\t/* reg uses 2 usec resolution */\n+#define IAVF_ITR_100K\t\t    10\t/* all values below must be even */\n+#define IAVF_ITR_50K\t\t    20\n+#define IAVF_ITR_20K\t\t    50\n+#define IAVF_ITR_18K\t\t    60\n+#define IAVF_ITR_8K\t\t   122\n+#define IAVF_MAX_ITR\t\t  8160\t/* maximum value as per datasheet */\n+#define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC)\n+#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK)\n+#define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC))\n+\n+#define IAVF_ITR_RX_DEF\t\t(IAVF_ITR_20K | IAVF_ITR_DYNAMIC)\n+#define IAVF_ITR_TX_DEF\t\t(IAVF_ITR_20K | IAVF_ITR_DYNAMIC)\n \n /* 0x40 is the enable bit for interrupt rate limiting, and must be set if\n  * the value of the rate limit is non-zero\n  */\n #define INTRL_ENA                  BIT(6)\n-#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */\n+#define IAVF_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */\n #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)\n #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)\n-#define I40E_INTRL_8K              125     /* 8000 ints/sec */\n-#define I40E_INTRL_62K             16      /* 62500 ints/sec */\n-#define I40E_INTRL_83K             12      /* 83333 ints/sec */\n+#define IAVF_INTRL_8K              125     /* 8000 ints/sec */\n+#define IAVF_INTRL_62K             16      /* 62500 ints/sec */\n+#define IAVF_INTRL_83K             12      /* 83333 ints/sec */\n \n-#define I40E_QUEUE_END_OF_LIST 0x7FF\n+#define IAVF_QUEUE_END_OF_LIST 0x7FF\n \n /* this enum matches hardware bits and is meant to be used by DYN_CTLN\n  * registers and QINT registers or more generally anywhere in the manual\n  * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any\n  * register but instead is a special value meaning \"don't update\" ITR0/1/2.\n  */\n-enum i40e_dyn_idx_t {\n-\tI40E_IDX_ITR0 = 0,\n-\tI40E_IDX_ITR1 = 1,\n-\tI40E_IDX_ITR2 = 2,\n-\tI40E_ITR_NONE = 3\t/* ITR_NONE must not be used as an index */\n+enum iavf_dyn_idx_t {\n+\tIAVF_IDX_ITR0 = 0,\n+\tIAVF_IDX_ITR1 = 1,\n+\tIAVF_IDX_ITR2 = 2,\n+\tIAVF_ITR_NONE = 3\t/* ITR_NONE must not be used as an index */\n };\n \n /* these are indexes into ITRN registers */\n-#define I40E_RX_ITR    I40E_IDX_ITR0\n-#define I40E_TX_ITR    I40E_IDX_ITR1\n-#define I40E_PE_ITR    I40E_IDX_ITR2\n+#define IAVF_RX_ITR    IAVF_IDX_ITR0\n+#define IAVF_TX_ITR    IAVF_IDX_ITR1\n+#define IAVF_PE_ITR    IAVF_IDX_ITR2\n \n /* Supported RSS offloads */\n-#define I40E_DEFAULT_RSS_HENA ( \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))\n-\n-#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \\\n-\tBIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))\n+#define IAVF_DEFAULT_RSS_HENA ( \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))\n+\n+#define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \\\n+\tBIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))\n \n /* Supported Rx Buffer Sizes (a multiple of 128) */\n-#define I40E_RXBUFFER_256   256\n-#define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */\n-#define I40E_RXBUFFER_2048  2048\n-#define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */\n-#define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */\n+#define IAVF_RXBUFFER_256   256\n+#define IAVF_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */\n+#define IAVF_RXBUFFER_2048  2048\n+#define IAVF_RXBUFFER_3072  3072  /* Used for large frames w/ padding */\n+#define IAVF_MAX_RXBUFFER   9728  /* largest size for single descriptor */\n \n /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we\n  * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,\n@@ -95,11 +95,11 @@ enum i40e_dyn_idx_t {\n  * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)\n  * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)\n  */\n-#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256\n-#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))\n-#define i40e_rx_desc i40e_32byte_rx_desc\n+#define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256\n+#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))\n+#define iavf_rx_desc iavf_32byte_rx_desc\n \n-#define I40E_RX_DMA_ATTR \\\n+#define IAVF_RX_DMA_ATTR \\\n \t(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)\n \n /* Attempt to maximize the headroom available for incoming frames.  We\n@@ -113,10 +113,10 @@ enum i40e_dyn_idx_t {\n  *\t receive path.\n  */\n #if (PAGE_SIZE < 8192)\n-#define I40E_2K_TOO_SMALL_WITH_PADDING \\\n-((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))\n+#define IAVF_2K_TOO_SMALL_WITH_PADDING \\\n+((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))\n \n-static inline int i40e_compute_pad(int rx_buf_len)\n+static inline int iavf_compute_pad(int rx_buf_len)\n {\n \tint page_size, pad_size;\n \n@@ -126,7 +126,7 @@ static inline int i40e_compute_pad(int rx_buf_len)\n \treturn pad_size;\n }\n \n-static inline int i40e_skb_pad(void)\n+static inline int iavf_skb_pad(void)\n {\n \tint rx_buf_len;\n \n@@ -137,25 +137,25 @@ static inline int i40e_skb_pad(void)\n \t * tailroom due to NET_IP_ALIGN possibly shifting us out of\n \t * cache-line alignment.\n \t */\n-\tif (I40E_2K_TOO_SMALL_WITH_PADDING)\n-\t\trx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);\n+\tif (IAVF_2K_TOO_SMALL_WITH_PADDING)\n+\t\trx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);\n \telse\n-\t\trx_buf_len = I40E_RXBUFFER_1536;\n+\t\trx_buf_len = IAVF_RXBUFFER_1536;\n \n \t/* if needed make room for NET_IP_ALIGN */\n \trx_buf_len -= NET_IP_ALIGN;\n \n-\treturn i40e_compute_pad(rx_buf_len);\n+\treturn iavf_compute_pad(rx_buf_len);\n }\n \n-#define I40E_SKB_PAD i40e_skb_pad()\n+#define IAVF_SKB_PAD iavf_skb_pad()\n #else\n-#define I40E_2K_TOO_SMALL_WITH_PADDING false\n-#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)\n+#define IAVF_2K_TOO_SMALL_WITH_PADDING false\n+#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)\n #endif\n \n /**\n- * i40e_test_staterr - tests bits in Rx descriptor status and error fields\n+ * iavf_test_staterr - tests bits in Rx descriptor status and error fields\n  * @rx_desc: pointer to receive descriptor (in le64 format)\n  * @stat_err_bits: value to mask\n  *\n@@ -164,7 +164,7 @@ static inline int i40e_skb_pad(void)\n  * The status_error_len doesn't need to be shifted because it begins\n  * at offset zero.\n  */\n-static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,\n+static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,\n \t\t\t\t     const u64 stat_err_bits)\n {\n \treturn !!(rx_desc->wb.qword1.status_error_len &\n@@ -172,8 +172,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,\n }\n \n /* How many Rx Buffers do we bundle into one write to the hardware ? */\n-#define I40E_RX_BUFFER_WRITE\t32\t/* Must be power of 2 */\n-#define I40E_RX_INCREMENT(r, i) \\\n+#define IAVF_RX_INCREMENT(r, i) \\\n \tdo {\t\t\t\t\t\\\n \t\t(i)++;\t\t\t\t\\\n \t\tif ((i) == (r)->count)\t\t\\\n@@ -181,7 +180,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,\n \t\tr->next_to_clean = i;\t\t\\\n \t} while (0)\n \n-#define I40E_RX_NEXT_DESC(r, i, n)\t\t\\\n+#define IAVF_RX_NEXT_DESC(r, i, n)\t\t\\\n \tdo {\t\t\t\t\t\\\n \t\t(i)++;\t\t\t\t\\\n \t\tif ((i) == (r)->count)\t\t\\\n@@ -189,26 +188,26 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,\n \t\t(n) = IAVF_RX_DESC((r), (i));\t\\\n \t} while (0)\n \n-#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n)\t\t\\\n+#define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n)\t\t\\\n \tdo {\t\t\t\t\t\t\\\n-\t\tI40E_RX_NEXT_DESC((r), (i), (n));\t\\\n+\t\tIAVF_RX_NEXT_DESC((r), (i), (n));\t\\\n \t\tprefetch((n));\t\t\t\t\\\n \t} while (0)\n \n-#define I40E_MAX_BUFFER_TXD\t8\n-#define I40E_MIN_TX_LEN\t\t17\n+#define IAVF_MAX_BUFFER_TXD\t8\n+#define IAVF_MIN_TX_LEN\t\t17\n \n /* The size limit for a transmit buffer in a descriptor is (16K - 1).\n  * In order to align with the read requests we will align the value to\n  * the nearest 4K which represents our maximum read request size.\n  */\n-#define I40E_MAX_READ_REQ_SIZE\t\t4096\n-#define I40E_MAX_DATA_PER_TXD\t\t(16 * 1024 - 1)\n-#define I40E_MAX_DATA_PER_TXD_ALIGNED \\\n-\t(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))\n+#define IAVF_MAX_READ_REQ_SIZE\t\t4096\n+#define IAVF_MAX_DATA_PER_TXD\t\t(16 * 1024 - 1)\n+#define IAVF_MAX_DATA_PER_TXD_ALIGNED \\\n+\t(IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1))\n \n /**\n- * i40e_txd_use_count  - estimate the number of descriptors needed for Tx\n+ * iavf_txd_use_count  - estimate the number of descriptors needed for Tx\n  * @size: transmit request size in bytes\n  *\n  * Due to hardware alignment restrictions (4K alignment), we need to\n@@ -235,31 +234,31 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,\n  * operations into:\n  *     return ((size * 85) >> 20) + 1;\n  */\n-static inline unsigned int i40e_txd_use_count(unsigned int size)\n+static inline unsigned int iavf_txd_use_count(unsigned int size)\n {\n \treturn ((size * 85) >> 20) + 1;\n }\n \n /* Tx Descriptors needed, worst case */\n #define DESC_NEEDED (MAX_SKB_FRAGS + 6)\n-#define I40E_MIN_DESC_PENDING\t4\n-\n-#define I40E_TX_FLAGS_HW_VLAN\t\tBIT(1)\n-#define I40E_TX_FLAGS_SW_VLAN\t\tBIT(2)\n-#define I40E_TX_FLAGS_TSO\t\tBIT(3)\n-#define I40E_TX_FLAGS_IPV4\t\tBIT(4)\n-#define I40E_TX_FLAGS_IPV6\t\tBIT(5)\n-#define I40E_TX_FLAGS_FCCRC\t\tBIT(6)\n-#define I40E_TX_FLAGS_FSO\t\tBIT(7)\n-#define I40E_TX_FLAGS_FD_SB\t\tBIT(9)\n-#define I40E_TX_FLAGS_VXLAN_TUNNEL\tBIT(10)\n-#define I40E_TX_FLAGS_VLAN_MASK\t\t0xffff0000\n-#define I40E_TX_FLAGS_VLAN_PRIO_MASK\t0xe0000000\n-#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT\t29\n-#define I40E_TX_FLAGS_VLAN_SHIFT\t16\n-\n-struct i40e_tx_buffer {\n-\tstruct i40e_tx_desc *next_to_watch;\n+#define IAVF_MIN_DESC_PENDING\t4\n+\n+#define IAVF_TX_FLAGS_HW_VLAN\t\tBIT(1)\n+#define IAVF_TX_FLAGS_SW_VLAN\t\tBIT(2)\n+#define IAVF_TX_FLAGS_TSO\t\tBIT(3)\n+#define IAVF_TX_FLAGS_IPV4\t\tBIT(4)\n+#define IAVF_TX_FLAGS_IPV6\t\tBIT(5)\n+#define IAVF_TX_FLAGS_FCCRC\t\tBIT(6)\n+#define IAVF_TX_FLAGS_FSO\t\tBIT(7)\n+#define IAVF_TX_FLAGS_FD_SB\t\tBIT(9)\n+#define IAVF_TX_FLAGS_VXLAN_TUNNEL\tBIT(10)\n+#define IAVF_TX_FLAGS_VLAN_MASK\t\t0xffff0000\n+#define IAVF_TX_FLAGS_VLAN_PRIO_MASK\t0xe0000000\n+#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT\t29\n+#define IAVF_TX_FLAGS_VLAN_SHIFT\t16\n+\n+struct iavf_tx_buffer {\n+\tstruct iavf_tx_desc *next_to_watch;\n \tunion {\n \t\tstruct sk_buff *skb;\n \t\tvoid *raw_buf;\n@@ -272,7 +271,7 @@ struct i40e_tx_buffer {\n \tu32 tx_flags;\n };\n \n-struct i40e_rx_buffer {\n+struct iavf_rx_buffer {\n \tdma_addr_t dma;\n \tstruct page *page;\n #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)\n@@ -283,12 +282,12 @@ struct i40e_rx_buffer {\n \t__u16 pagecnt_bias;\n };\n \n-struct i40e_queue_stats {\n+struct iavf_queue_stats {\n \tu64 packets;\n \tu64 bytes;\n };\n \n-struct i40e_tx_queue_stats {\n+struct iavf_tx_queue_stats {\n \tu64 restart_queue;\n \tu64 tx_busy;\n \tu64 tx_done_old;\n@@ -298,7 +297,7 @@ struct i40e_tx_queue_stats {\n \tu64 tx_lost_interrupt;\n };\n \n-struct i40e_rx_queue_stats {\n+struct iavf_rx_queue_stats {\n \tu64 non_eop_descs;\n \tu64 alloc_page_failed;\n \tu64 alloc_buff_failed;\n@@ -306,34 +305,34 @@ struct i40e_rx_queue_stats {\n \tu64 realloc_count;\n };\n \n-enum i40e_ring_state_t {\n-\t__I40E_TX_FDIR_INIT_DONE,\n-\t__I40E_TX_XPS_INIT_DONE,\n-\t__I40E_RING_STATE_NBITS /* must be last */\n+enum iavf_ring_state_t {\n+\t__IAVF_TX_FDIR_INIT_DONE,\n+\t__IAVF_TX_XPS_INIT_DONE,\n+\t__IAVF_RING_STATE_NBITS /* must be last */\n };\n \n /* some useful defines for virtchannel interface, which\n  * is the only remaining user of header split\n  */\n-#define I40E_RX_DTYPE_NO_SPLIT      0\n-#define I40E_RX_DTYPE_HEADER_SPLIT  1\n-#define I40E_RX_DTYPE_SPLIT_ALWAYS  2\n-#define I40E_RX_SPLIT_L2      0x1\n-#define I40E_RX_SPLIT_IP      0x2\n-#define I40E_RX_SPLIT_TCP_UDP 0x4\n-#define I40E_RX_SPLIT_SCTP    0x8\n+#define IAVF_RX_DTYPE_NO_SPLIT      0\n+#define IAVF_RX_DTYPE_HEADER_SPLIT  1\n+#define IAVF_RX_DTYPE_SPLIT_ALWAYS  2\n+#define IAVF_RX_SPLIT_L2      0x1\n+#define IAVF_RX_SPLIT_IP      0x2\n+#define IAVF_RX_SPLIT_TCP_UDP 0x4\n+#define IAVF_RX_SPLIT_SCTP    0x8\n \n /* struct that defines a descriptor ring, associated with a VSI */\n-struct i40e_ring {\n-\tstruct i40e_ring *next;\t\t/* pointer to next ring in q_vector */\n+struct iavf_ring {\n+\tstruct iavf_ring *next;\t\t/* pointer to next ring in q_vector */\n \tvoid *desc;\t\t\t/* Descriptor ring memory */\n \tstruct device *dev;\t\t/* Used for DMA mapping */\n \tstruct net_device *netdev;\t/* netdev ring maps to */\n \tunion {\n-\t\tstruct i40e_tx_buffer *tx_bi;\n-\t\tstruct i40e_rx_buffer *rx_bi;\n+\t\tstruct iavf_tx_buffer *tx_bi;\n+\t\tstruct iavf_rx_buffer *rx_bi;\n \t};\n-\tDECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);\n+\tDECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);\n \tu16 queue_index;\t\t/* Queue number of ring */\n \tu8 dcb_tc;\t\t\t/* Traffic class of ring */\n \tu8 __iomem *tail;\n@@ -361,22 +360,22 @@ struct i40e_ring {\n \tu8 packet_stride;\n \n \tu16 flags;\n-#define I40E_TXR_FLAGS_WB_ON_ITR\t\tBIT(0)\n-#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED\tBIT(1)\n+#define IAVF_TXR_FLAGS_WB_ON_ITR\t\tBIT(0)\n+#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED\tBIT(1)\n \n \t/* stats structs */\n-\tstruct i40e_queue_stats\tstats;\n+\tstruct iavf_queue_stats\tstats;\n \tstruct u64_stats_sync syncp;\n \tunion {\n-\t\tstruct i40e_tx_queue_stats tx_stats;\n-\t\tstruct i40e_rx_queue_stats rx_stats;\n+\t\tstruct iavf_tx_queue_stats tx_stats;\n+\t\tstruct iavf_rx_queue_stats rx_stats;\n \t};\n \n \tunsigned int size;\t\t/* length of descriptor ring in bytes */\n \tdma_addr_t dma;\t\t\t/* physical address of ring */\n \n-\tstruct i40e_vsi *vsi;\t\t/* Backreference to associated VSI */\n-\tstruct i40e_q_vector *q_vector;\t/* Backreference to associated vector */\n+\tstruct iavf_vsi *vsi;\t\t/* Backreference to associated VSI */\n+\tstruct iavf_q_vector *q_vector;\t/* Backreference to associated vector */\n \n \tstruct rcu_head rcu;\t\t/* to avoid race on free */\n \tu16 next_to_alloc;\n@@ -390,30 +389,30 @@ struct i40e_ring {\n \t\t\t\t\t */\n } ____cacheline_internodealigned_in_smp;\n \n-static inline bool ring_uses_build_skb(struct i40e_ring *ring)\n+static inline bool ring_uses_build_skb(struct iavf_ring *ring)\n {\n-\treturn !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);\n+\treturn !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);\n }\n \n-static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)\n+static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)\n {\n-\tring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;\n+\tring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;\n }\n \n-static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)\n+static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)\n {\n-\tring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;\n+\tring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;\n }\n \n-#define I40E_ITR_ADAPTIVE_MIN_INC\t0x0002\n-#define I40E_ITR_ADAPTIVE_MIN_USECS\t0x0002\n-#define I40E_ITR_ADAPTIVE_MAX_USECS\t0x007e\n-#define I40E_ITR_ADAPTIVE_LATENCY\t0x8000\n-#define I40E_ITR_ADAPTIVE_BULK\t\t0x0000\n-#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))\n+#define IAVF_ITR_ADAPTIVE_MIN_INC\t0x0002\n+#define IAVF_ITR_ADAPTIVE_MIN_USECS\t0x0002\n+#define IAVF_ITR_ADAPTIVE_MAX_USECS\t0x007e\n+#define IAVF_ITR_ADAPTIVE_LATENCY\t0x8000\n+#define IAVF_ITR_ADAPTIVE_BULK\t\t0x0000\n+#define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY))\n \n-struct i40e_ring_container {\n-\tstruct i40e_ring *ring;\t\t/* pointer to linked list of ring(s) */\n+struct iavf_ring_container {\n+\tstruct iavf_ring *ring;\t\t/* pointer to linked list of ring(s) */\n \tunsigned long next_update;\t/* jiffies value of next update */\n \tunsigned int total_bytes;\t/* total bytes processed this int */\n \tunsigned int total_packets;\t/* total packets processed this int */\n@@ -423,10 +422,10 @@ struct i40e_ring_container {\n };\n \n /* iterator for handling rings in ring container */\n-#define i40e_for_each_ring(pos, head) \\\n+#define iavf_for_each_ring(pos, head) \\\n \tfor (pos = (head).ring; pos != NULL; pos = pos->next)\n \n-static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)\n+static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)\n {\n #if (PAGE_SIZE < 8192)\n \tif (ring->rx_buf_len > (PAGE_SIZE / 2))\n@@ -435,25 +434,25 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)\n \treturn 0;\n }\n \n-#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))\n+#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))\n \n-bool iavf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);\n+bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);\n netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);\n-void iavf_clean_tx_ring(struct i40e_ring *tx_ring);\n-void iavf_clean_rx_ring(struct i40e_ring *rx_ring);\n-int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring);\n-int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring);\n-void iavf_free_tx_resources(struct i40e_ring *tx_ring);\n-void iavf_free_rx_resources(struct i40e_ring *rx_ring);\n+void iavf_clean_tx_ring(struct iavf_ring *tx_ring);\n+void iavf_clean_rx_ring(struct iavf_ring *rx_ring);\n+int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);\n+int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);\n+void iavf_free_tx_resources(struct iavf_ring *tx_ring);\n+void iavf_free_rx_resources(struct iavf_ring *rx_ring);\n int iavf_napi_poll(struct napi_struct *napi, int budget);\n-void iavf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);\n-u32 iavf_get_tx_pending(struct i40e_ring *ring, bool in_sw);\n-void iavf_detect_recover_hung(struct i40e_vsi *vsi);\n-int __iavf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);\n+void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector);\n+u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw);\n+void iavf_detect_recover_hung(struct iavf_vsi *vsi);\n+int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);\n bool __iavf_chk_linearize(struct sk_buff *skb);\n \n /**\n- * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed\n+ * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed\n  * @skb:     send buffer\n  * @tx_ring: ring to send buffer on\n  *\n@@ -461,14 +460,14 @@ bool __iavf_chk_linearize(struct sk_buff *skb);\n  * there is not enough descriptors available in this ring since we need at least\n  * one descriptor.\n  **/\n-static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)\n+static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)\n {\n \tconst struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];\n \tunsigned int nr_frags = skb_shinfo(skb)->nr_frags;\n \tint count = 0, size = skb_headlen(skb);\n \n \tfor (;;) {\n-\t\tcount += i40e_txd_use_count(size);\n+\t\tcount += iavf_txd_use_count(size);\n \n \t\tif (!nr_frags--)\n \t\t\tbreak;\n@@ -480,21 +479,21 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)\n }\n \n /**\n- * i40e_maybe_stop_tx - 1st level check for Tx stop conditions\n+ * iavf_maybe_stop_tx - 1st level check for Tx stop conditions\n  * @tx_ring: the ring to be checked\n  * @size:    the size buffer we want to assure is available\n  *\n  * Returns 0 if stop is not needed\n  **/\n-static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)\n+static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)\n {\n-\tif (likely(I40E_DESC_UNUSED(tx_ring) >= size))\n+\tif (likely(IAVF_DESC_UNUSED(tx_ring) >= size))\n \t\treturn 0;\n \treturn __iavf_maybe_stop_tx(tx_ring, size);\n }\n \n /**\n- * i40e_chk_linearize - Check if there are more than 8 fragments per packet\n+ * iavf_chk_linearize - Check if there are more than 8 fragments per packet\n  * @skb:      send buffer\n  * @count:    number of buffers used\n  *\n@@ -502,23 +501,23 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)\n  * a packet on the wire and so we need to figure out the cases where we\n  * need to linearize the skb.\n  **/\n-static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)\n+static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)\n {\n \t/* Both TSO and single send will work if count is less than 8 */\n-\tif (likely(count < I40E_MAX_BUFFER_TXD))\n+\tif (likely(count < IAVF_MAX_BUFFER_TXD))\n \t\treturn false;\n \n \tif (skb_is_gso(skb))\n \t\treturn __iavf_chk_linearize(skb);\n \n \t/* we can support up to 8 data buffers for a single send */\n-\treturn count != I40E_MAX_BUFFER_TXD;\n+\treturn count != IAVF_MAX_BUFFER_TXD;\n }\n /**\n  * @ring: Tx ring to find the netdev equivalent of\n  **/\n-static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)\n+static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)\n {\n \treturn netdev_get_tx_queue(ring->netdev, ring->queue_index);\n }\n-#endif /* _I40E_TXRX_H_ */\n+#endif /* _IAVF_TXRX_H_ */\ndiff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c\nindex fdf4c5ffbe6f..8622d7781fd7 100644\n--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c\n+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c\n@@ -195,7 +195,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)\n \tu16 len;\n \n \tlen =  sizeof(struct virtchnl_vf_resource) +\n-\t\tI40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);\n+\t\tIAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);\n \tevent.buf_len = len;\n \tevent.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);\n \tif (!event.msg_buf) {\n@@ -242,7 +242,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)\n \tstruct virtchnl_vsi_queue_config_info *vqci;\n \tstruct virtchnl_queue_pair_info *vqpi;\n \tint pairs = adapter->num_active_queues;\n-\tint i, len, max_frame = I40E_MAX_RXBUFFER;\n+\tint i, len, max_frame = IAVF_MAX_RXBUFFER;\n \n \tif (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {\n \t\t/* bail because we already have a command pending */\n@@ -260,7 +260,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)\n \t/* Limit maximum frame size when jumbo frames is not enabled */\n \tif (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&\n \t    (adapter->netdev->mtu <= ETH_DATA_LEN))\n-\t\tmax_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;\n+\t\tmax_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;\n \n \tvqci->vsi_id = adapter->vsi_res->vsi_id;\n \tvqci->num_queue_pairs = pairs;\n@@ -280,7 +280,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)\n \t\tvqpi->rxq.max_pkt_size = max_frame;\n \t\tvqpi->rxq.databuffer_size =\n \t\t\tALIGN(adapter->rx_rings[i].rx_buf_len,\n-\t\t\t      BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));\n+\t\t\t      BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));\n \t\tvqpi++;\n \t}\n \n@@ -352,7 +352,7 @@ void iavf_map_queues(struct iavf_adapter *adapter)\n \tstruct virtchnl_irq_map_info *vimi;\n \tstruct virtchnl_vector_map *vecmap;\n \tint v_idx, q_vectors, len;\n-\tstruct i40e_q_vector *q_vector;\n+\tstruct iavf_q_vector *q_vector;\n \n \tif (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {\n \t\t/* bail because we already have a command pending */\n@@ -381,8 +381,8 @@ void iavf_map_queues(struct iavf_adapter *adapter)\n \t\tvecmap->vector_id = v_idx + NONQ_VECS;\n \t\tvecmap->txq_map = q_vector->ring_mask;\n \t\tvecmap->rxq_map = q_vector->ring_mask;\n-\t\tvecmap->rxitr_idx = I40E_RX_ITR;\n-\t\tvecmap->txitr_idx = I40E_TX_ITR;\n+\t\tvecmap->rxitr_idx = IAVF_RX_ITR;\n+\t\tvecmap->txitr_idx = IAVF_TX_ITR;\n \t}\n \t/* Misc vector last - this is only for AdminQ messages */\n \tvecmap = &vimi->vecmap[v_idx];\n@@ -1322,8 +1322,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,\n \t}\n \tswitch (v_opcode) {\n \tcase VIRTCHNL_OP_GET_STATS: {\n-\t\tstruct i40e_eth_stats *stats =\n-\t\t\t(struct i40e_eth_stats *)msg;\n+\t\tstruct iavf_eth_stats *stats =\n+\t\t\t(struct iavf_eth_stats *)msg;\n \t\tnetdev->stats.rx_packets = stats->rx_unicast +\n \t\t\t\t\t   stats->rx_multicast +\n \t\t\t\t\t   stats->rx_broadcast;\n@@ -1340,7 +1340,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,\n \t\tbreak;\n \tcase VIRTCHNL_OP_GET_VF_RESOURCES: {\n \t\tu16 len = sizeof(struct virtchnl_vf_resource) +\n-\t\t\t  I40E_MAX_VF_VSI *\n+\t\t\t  IAVF_MAX_VF_VSI *\n \t\t\t  sizeof(struct virtchnl_vsi_resource);\n \t\tmemcpy(adapter->vf_res, msg, min(msglen, len));\n \t\tiavf_validate_num_queues(adapter);\n",
    "prefixes": [
        "net-next",
        "v2",
        "12/14"
    ]
}