get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1258417/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1258417,
    "url": "http://patchwork.ozlabs.org/api/patches/1258417/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20200319201718.66672-1-anthony.l.nguyen@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20200319201718.66672-1-anthony.l.nguyen@intel.com>",
    "list_archive_url": null,
    "date": "2020-03-19T20:17:18",
    "name": "[S40,v2,4/15] ice: Add support for tunnel offloads",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "e29d5ca074f6619405c9f620b42a65c1bf115034",
    "submitter": {
        "id": 68875,
        "url": "http://patchwork.ozlabs.org/api/people/68875/?format=api",
        "name": "Tony Nguyen",
        "email": "anthony.l.nguyen@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20200319201718.66672-1-anthony.l.nguyen@intel.com/mbox/",
    "series": [
        {
            "id": 165458,
            "url": "http://patchwork.ozlabs.org/api/series/165458/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=165458",
            "date": "2020-03-19T20:17:18",
            "name": null,
            "version": 2,
            "mbox": "http://patchwork.ozlabs.org/series/165458/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1258417/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/1258417/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org; spf=pass (sender SPF authorized)\n\tsmtp.mailfrom=osuosl.org (client-ip=140.211.166.133;\n\thelo=hemlock.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 48jysk5lcFz9sPJ\n\tfor <incoming@patchwork.ozlabs.org>;\n\tFri, 20 Mar 2020 07:18:49 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 1034D86DA3;\n\tThu, 19 Mar 2020 20:18:48 +0000 (UTC)",
            "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id a72fNXq6dd1r; Thu, 19 Mar 2020 20:18:44 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 76F938750A;\n\tThu, 19 Mar 2020 20:18:44 +0000 (UTC)",
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\tby ash.osuosl.org (Postfix) with ESMTP id 0EA871BF39F\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 19 Mar 2020 20:18:43 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 005C5862A0\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 19 Mar 2020 20:18:43 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 0rPiEllghG1C for <intel-wired-lan@lists.osuosl.org>;\n\tThu, 19 Mar 2020 20:18:40 +0000 (UTC)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby whitealder.osuosl.org (Postfix) with ESMTPS id 2508986156\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 19 Mar 2020 20:18:40 +0000 (UTC)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; \n\t19 Mar 2020 13:18:35 -0700",
            "from unknown (HELO localhost.jf.intel.com) ([10.166.241.65])\n\tby FMSMGA003.fm.intel.com with ESMTP; 19 Mar 2020 13:18:35 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "IronPort-SDR": [
            "UnNAZfIzgDg9yhT3nN2iQdRN1xx8MtR4xSsJdbCg34rhS+8emadpM5b2eudpchZOZa6ztNQ5HA\n\tFzhRafY5I1OQ==",
            "+f5ji9v3NPyBSzK7uBcFWwu8MxK91/DCPQqQrldgl1menIy2X91sQi7OKITke2EW3Pmet05BKs\n\t1fwZIZD+0kKQ=="
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.70,572,1574150400\"; d=\"scan'208\";a=\"291725563\"",
        "From": "Tony Nguyen <anthony.l.nguyen@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Thu, 19 Mar 2020 13:17:18 -0700",
        "Message-Id": "<20200319201718.66672-1-anthony.l.nguyen@intel.com>",
        "X-Mailer": "git-send-email 2.20.1",
        "MIME-Version": "1.0",
        "Subject": "[Intel-wired-lan] [PATCH S40 v2 4/15] ice: Add support for tunnel\n\toffloads",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "Create a boost TCAM entry for each tunnel port in order to get a tunnel\nPTYPE. Update netdev feature flags and implement the appropriate logic to\nget and set values for hardware offloads.\n\nSigned-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>\nSigned-off-by: Henry Tieman <henry.w.tieman@intel.com>\n---\nv2:\n- Add reference counting for same port additions\n- Removed parameter 'index' from ice_tunnel_port_in_use()\n- Change message level for exceeding max tunnel ports and tunnel not found to\ninfo\n- Redo description for ice_create_tunnel()\n- Removed unneccesary casts\n---\n drivers/net/ethernet/intel/ice/ice.h          |   4 +\n drivers/net/ethernet/intel/ice/ice_common.c   |   2 +\n .../net/ethernet/intel/ice/ice_flex_pipe.c    | 530 +++++++++++++++++-\n .../net/ethernet/intel/ice/ice_flex_pipe.h    |   5 +\n .../net/ethernet/intel/ice/ice_flex_type.h    |  33 ++\n drivers/net/ethernet/intel/ice/ice_flow.c     |  36 +-\n drivers/net/ethernet/intel/ice/ice_flow.h     |   3 +\n .../net/ethernet/intel/ice/ice_lan_tx_rx.h    |  25 +\n drivers/net/ethernet/intel/ice/ice_main.c     |  94 +++-\n .../ethernet/intel/ice/ice_protocol_type.h    |   1 +\n drivers/net/ethernet/intel/ice/ice_txrx.c     | 126 ++++-\n drivers/net/ethernet/intel/ice/ice_txrx.h     |   3 +\n drivers/net/ethernet/intel/ice/ice_txrx_lib.c |  21 +-\n drivers/net/ethernet/intel/ice/ice_type.h     |   4 +\n 14 files changed, 873 insertions(+), 14 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h\nindex ce73a6a96aac..342913d8ae6a 100644\n--- a/drivers/net/ethernet/intel/ice/ice.h\n+++ b/drivers/net/ethernet/intel/ice/ice.h\n@@ -36,6 +36,10 @@\n #include <linux/avf/virtchnl.h>\n #include <net/ipv6.h>\n #include <net/xdp_sock.h>\n+#include <net/geneve.h>\n+#include <net/gre.h>\n+#include <net/udp_tunnel.h>\n+#include <net/vxlan.h>\n #include \"ice_devids.h\"\n #include \"ice_type.h\"\n #include \"ice_txrx.h\"\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c\nindex e574a70fcc99..78d772acebef 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.c\n+++ b/drivers/net/ethernet/intel/ice/ice_common.c\n@@ -769,6 +769,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)\n \tstatus = ice_init_hw_tbls(hw);\n \tif (status)\n \t\tgoto err_unroll_fltr_mgmt_struct;\n+\tmutex_init(&hw->tnl_lock);\n \treturn 0;\n \n err_unroll_fltr_mgmt_struct:\n@@ -798,6 +799,7 @@ void ice_deinit_hw(struct ice_hw *hw)\n \tice_sched_clear_agg(hw);\n \tice_free_seg(hw);\n \tice_free_hw_tbls(hw);\n+\tmutex_destroy(&hw->tnl_lock);\n \n \tif (hw->port_info) {\n \t\tdevm_kfree(ice_hw_to_dev(hw), hw->port_info);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c\nindex 42bac3ec5526..8995a9b8a704 100644\n--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c\n+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c\n@@ -5,6 +5,15 @@\n #include \"ice_flex_pipe.h\"\n #include \"ice_flow.h\"\n \n+/* To support tunneling entries by PF, the package will append the PF number to\n+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.\n+ */\n+static const struct ice_tunnel_type_scan tnls[] = {\n+\t{ TNL_VXLAN,\t\t\"TNL_VXLAN_PF\" },\n+\t{ TNL_GENEVE,\t\t\"TNL_GENEVE_PF\" },\n+\t{ TNL_LAST,\t\t\"\" }\n+};\n+\n static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {\n \t/* SWITCH */\n \t{\n@@ -239,6 +248,268 @@ ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,\n \treturn state->sect;\n }\n \n+/**\n+ * ice_pkg_enum_entry\n+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)\n+ * @state: pointer to the enum state\n+ * @sect_type: section type to enumerate\n+ * @offset: pointer to variable that receives the offset in the table (optional)\n+ * @handler: function that handles access to the entries into the section type\n+ *\n+ * This function will enumerate all the entries in particular section type in\n+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;\n+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.\n+ * When the function returns a NULL pointer, then the end of the entries has\n+ * been reached.\n+ *\n+ * Since each section may have a different header and entry size, the handler\n+ * function is needed to determine the number and location entries in each\n+ * section.\n+ *\n+ * The offset parameter is optional, but should be used for sections that\n+ * contain an offset for each section table. For such cases, the section handler\n+ * function must return the appropriate offset + index to give the absolution\n+ * offset for each entry. For example, if the base for a section's header\n+ * indicates a base offset of 10, and the index for the entry is 2, then\n+ * section handler function should set the offset to 10 + 2 = 12.\n+ */\n+static void *\n+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,\n+\t\t   u32 sect_type, u32 *offset,\n+\t\t   void *(*handler)(u32 sect_type, void *section,\n+\t\t\t\t    u32 index, u32 *offset))\n+{\n+\tvoid *entry;\n+\n+\tif (ice_seg) {\n+\t\tif (!handler)\n+\t\t\treturn NULL;\n+\n+\t\tif (!ice_pkg_enum_section(ice_seg, state, sect_type))\n+\t\t\treturn NULL;\n+\n+\t\tstate->entry_idx = 0;\n+\t\tstate->handler = handler;\n+\t} else {\n+\t\tstate->entry_idx++;\n+\t}\n+\n+\tif (!state->handler)\n+\t\treturn NULL;\n+\n+\t/* get entry */\n+\tentry = state->handler(state->sect_type, state->sect, state->entry_idx,\n+\t\t\t       offset);\n+\tif (!entry) {\n+\t\t/* end of a section, look for another section of this type */\n+\t\tif (!ice_pkg_enum_section(NULL, state, 0))\n+\t\t\treturn NULL;\n+\n+\t\tstate->entry_idx = 0;\n+\t\tentry = state->handler(state->sect_type, state->sect,\n+\t\t\t\t       state->entry_idx, offset);\n+\t}\n+\n+\treturn entry;\n+}\n+\n+/**\n+ * ice_boost_tcam_handler\n+ * @sect_type: section type\n+ * @section: pointer to section\n+ * @index: index of the boost TCAM entry to be returned\n+ * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections\n+ *\n+ * This is a callback function that can be passed to ice_pkg_enum_entry.\n+ * Handles enumeration of individual boost TCAM entries.\n+ */\n+static void *\n+ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)\n+{\n+\tstruct ice_boost_tcam_section *boost;\n+\n+\tif (!section)\n+\t\treturn NULL;\n+\n+\tif (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)\n+\t\treturn NULL;\n+\n+\tif (index > ICE_MAX_BST_TCAMS_IN_BUF)\n+\t\treturn NULL;\n+\n+\tif (offset)\n+\t\t*offset = 0;\n+\n+\tboost = section;\n+\tif (index >= le16_to_cpu(boost->count))\n+\t\treturn NULL;\n+\n+\treturn boost->tcam + index;\n+}\n+\n+/**\n+ * ice_find_boost_entry\n+ * @ice_seg: pointer to the ice segment (non-NULL)\n+ * @addr: Boost TCAM address of entry to search for\n+ * @entry: returns pointer to the entry\n+ *\n+ * Finds a particular Boost TCAM entry and returns a pointer to that entry\n+ * if it is found. The ice_seg parameter must not be NULL since the first call\n+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.\n+ */\n+static enum ice_status\n+ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,\n+\t\t     struct ice_boost_tcam_entry **entry)\n+{\n+\tstruct ice_boost_tcam_entry *tcam;\n+\tstruct ice_pkg_enum state;\n+\n+\tmemset(&state, 0, sizeof(state));\n+\n+\tif (!ice_seg)\n+\t\treturn ICE_ERR_PARAM;\n+\n+\tdo {\n+\t\ttcam = ice_pkg_enum_entry(ice_seg, &state,\n+\t\t\t\t\t  ICE_SID_RXPARSER_BOOST_TCAM, NULL,\n+\t\t\t\t\t  ice_boost_tcam_handler);\n+\t\tif (tcam && le16_to_cpu(tcam->addr) == addr) {\n+\t\t\t*entry = tcam;\n+\t\t\treturn 0;\n+\t\t}\n+\n+\t\tice_seg = NULL;\n+\t} while (tcam);\n+\n+\t*entry = NULL;\n+\treturn ICE_ERR_CFG;\n+}\n+\n+/**\n+ * ice_label_enum_handler\n+ * @sect_type: section type\n+ * @section: pointer to section\n+ * @index: index of the label entry to be returned\n+ * @offset: pointer to receive absolute offset, always zero for label sections\n+ *\n+ * This is a callback function that can be passed to ice_pkg_enum_entry.\n+ * Handles enumeration of individual label entries.\n+ */\n+static void *\n+ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,\n+\t\t       u32 *offset)\n+{\n+\tstruct ice_label_section *labels;\n+\n+\tif (!section)\n+\t\treturn NULL;\n+\n+\tif (index > ICE_MAX_LABELS_IN_BUF)\n+\t\treturn NULL;\n+\n+\tif (offset)\n+\t\t*offset = 0;\n+\n+\tlabels = section;\n+\tif (index >= le16_to_cpu(labels->count))\n+\t\treturn NULL;\n+\n+\treturn labels->label + index;\n+}\n+\n+/**\n+ * ice_enum_labels\n+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)\n+ * @type: the section type that will contain the label (0 on subsequent calls)\n+ * @state: ice_pkg_enum structure that will hold the state of the enumeration\n+ * @value: pointer to a value that will return the label's value if found\n+ *\n+ * Enumerates a list of labels in the package. The caller will call\n+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call\n+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL\n+ * the end of the list has been reached.\n+ */\n+static char *\n+ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,\n+\t\tu16 *value)\n+{\n+\tstruct ice_label *label;\n+\n+\t/* Check for valid label section on first call */\n+\tif (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))\n+\t\treturn NULL;\n+\n+\tlabel = ice_pkg_enum_entry(ice_seg, state, type, NULL,\n+\t\t\t\t   ice_label_enum_handler);\n+\tif (!label)\n+\t\treturn NULL;\n+\n+\t*value = le16_to_cpu(label->value);\n+\treturn label->name;\n+}\n+\n+/**\n+ * ice_init_pkg_hints\n+ * @hw: pointer to the HW structure\n+ * @ice_seg: pointer to the segment of the package scan (non-NULL)\n+ *\n+ * This function will scan the package and save off relevant information\n+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL\n+ * since the first call to ice_enum_labels requires a pointer to an actual\n+ * ice_seg structure.\n+ */\n+static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)\n+{\n+\tstruct ice_pkg_enum state;\n+\tchar *label_name;\n+\tu16 val;\n+\tint i;\n+\n+\tmemset(&hw->tnl, 0, sizeof(hw->tnl));\n+\tmemset(&state, 0, sizeof(state));\n+\n+\tif (!ice_seg)\n+\t\treturn;\n+\n+\tlabel_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,\n+\t\t\t\t     &val);\n+\n+\twhile (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {\n+\t\tfor (i = 0; tnls[i].type != TNL_LAST; i++) {\n+\t\t\tsize_t len = strlen(tnls[i].label_prefix);\n+\n+\t\t\t/* Look for matching label start, before continuing */\n+\t\t\tif (strncmp(label_name, tnls[i].label_prefix, len))\n+\t\t\t\tcontinue;\n+\n+\t\t\t/* Make sure this label matches our PF. Note that the PF\n+\t\t\t * character ('0' - '7') will be located where our\n+\t\t\t * prefix string's null terminator is located.\n+\t\t\t */\n+\t\t\tif ((label_name[len] - '0') == hw->pf_id) {\n+\t\t\t\thw->tnl.tbl[hw->tnl.count].type = tnls[i].type;\n+\t\t\t\thw->tnl.tbl[hw->tnl.count].valid = false;\n+\t\t\t\thw->tnl.tbl[hw->tnl.count].in_use = false;\n+\t\t\t\thw->tnl.tbl[hw->tnl.count].marked = false;\n+\t\t\t\thw->tnl.tbl[hw->tnl.count].boost_addr = val;\n+\t\t\t\thw->tnl.tbl[hw->tnl.count].port = 0;\n+\t\t\t\thw->tnl.count++;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\n+\t\tlabel_name = ice_enum_labels(NULL, 0, &state, &val);\n+\t}\n+\n+\t/* Cache the appropriate boost TCAM entry pointers */\n+\tfor (i = 0; i < hw->tnl.count; i++) {\n+\t\tice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,\n+\t\t\t\t     &hw->tnl.tbl[i].boost_entry);\n+\t\tif (hw->tnl.tbl[i].boost_entry)\n+\t\t\thw->tnl.tbl[i].valid = true;\n+\t}\n+}\n+\n /* Key creation */\n \n #define ICE_DC_KEY\t0x1\t/* don't care */\n@@ -1050,7 +1321,8 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)\n \t\treturn ICE_ERR_CFG;\n \t}\n \n-\t/* download package */\n+\t/* initialize package hints and then download package */\n+\tice_init_pkg_hints(hw, seg);\n \tstatus = ice_download_pkg(hw, seg);\n \tif (status == ICE_ERR_AQ_NO_WORK) {\n \t\tice_debug(hw, ICE_DBG_INIT,\n@@ -1292,6 +1564,262 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)\n \treturn &bld->buf;\n }\n \n+/**\n+ * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage\n+ * @hw: pointer to the HW structure\n+ * @port: port to search for\n+ * @index: optionally returns index\n+ *\n+ * Returns whether a port is already in use as a tunnel, and optionally its\n+ * index\n+ */\n+static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)\n+{\n+\tu16 i;\n+\n+\tfor (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)\n+\t\tif (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {\n+\t\t\tif (index)\n+\t\t\t\t*index = i;\n+\t\t\treturn true;\n+\t\t}\n+\n+\treturn false;\n+}\n+\n+/**\n+ * ice_tunnel_port_in_use\n+ * @hw: pointer to the HW structure\n+ * @port: port to search for\n+ * @index: optionally returns index\n+ *\n+ * Returns whether a port is already in use as a tunnel, and optionally its\n+ * index\n+ */\n+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)\n+{\n+\tbool res;\n+\n+\tmutex_lock(&hw->tnl_lock);\n+\tres = ice_tunnel_port_in_use_hlpr(hw, port, index);\n+\tmutex_unlock(&hw->tnl_lock);\n+\n+\treturn res;\n+}\n+\n+/**\n+ * ice_find_free_tunnel_entry\n+ * @hw: pointer to the HW structure\n+ * @type: tunnel type\n+ * @index: optionally returns index\n+ *\n+ * Returns whether there is a free tunnel entry, and optionally its index\n+ */\n+static bool\n+ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,\n+\t\t\t   u16 *index)\n+{\n+\tbool res = false;\n+\tu16 i;\n+\n+\tmutex_lock(&hw->tnl_lock);\n+\n+\tfor (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)\n+\t\tif (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&\n+\t\t    hw->tnl.tbl[i].type == type) {\n+\t\t\tif (index)\n+\t\t\t\t*index = i;\n+\t\t\tres = true;\n+\t\t\tbreak;\n+\t\t}\n+\n+\tmutex_unlock(&hw->tnl_lock);\n+\n+\treturn res;\n+}\n+\n+/**\n+ * ice_create_tunnel\n+ * @hw: pointer to the HW structure\n+ * @type: type of tunnel\n+ * @port: port of tunnel to create\n+ *\n+ * Create a tunnel by updating the parse graph in the parser. We do that by\n+ * creating a package buffer with the tunnel info and issuing an update package\n+ * command.\n+ */\n+enum ice_status\n+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)\n+{\n+\tstruct ice_boost_tcam_section *sect_rx, *sect_tx;\n+\tenum ice_status status = ICE_ERR_MAX_LIMIT;\n+\tstruct ice_buf_build *bld;\n+\tu16 index;\n+\n+\tmutex_lock(&hw->tnl_lock);\n+\n+\tif (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {\n+\t\thw->tnl.tbl[index].ref++;\n+\t\tstatus = 0;\n+\t\tgoto ice_create_tunnel_end;\n+\t}\n+\n+\tif (!ice_find_free_tunnel_entry(hw, type, &index)) {\n+\t\tstatus = ICE_ERR_OUT_OF_RANGE;\n+\t\tgoto ice_create_tunnel_end;\n+\t}\n+\n+\tbld = ice_pkg_buf_alloc(hw);\n+\tif (!bld) {\n+\t\tstatus = ICE_ERR_NO_MEMORY;\n+\t\tgoto ice_create_tunnel_end;\n+\t}\n+\n+\t/* allocate 2 sections, one for Rx parser, one for Tx parser */\n+\tif (ice_pkg_buf_reserve_section(bld, 2))\n+\t\tgoto ice_create_tunnel_err;\n+\n+\tsect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,\n+\t\t\t\t\t    sizeof(*sect_rx));\n+\tif (!sect_rx)\n+\t\tgoto ice_create_tunnel_err;\n+\tsect_rx->count = cpu_to_le16(1);\n+\n+\tsect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,\n+\t\t\t\t\t    sizeof(*sect_tx));\n+\tif (!sect_tx)\n+\t\tgoto ice_create_tunnel_err;\n+\tsect_tx->count = cpu_to_le16(1);\n+\n+\t/* copy original boost entry to update package buffer */\n+\tmemcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,\n+\t       sizeof(*sect_rx->tcam));\n+\n+\t/* over-write the never-match dest port key bits with the encoded port\n+\t * bits\n+\t */\n+\tice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),\n+\t\t    (u8 *)&port, NULL, NULL, NULL,\n+\t\t    offsetof(struct ice_boost_key_value, hv_dst_port_key),\n+\t\t    sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));\n+\n+\t/* exact copy of entry to Tx section entry */\n+\tmemcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));\n+\n+\tstatus = ice_update_pkg(hw, ice_pkg_buf(bld), 1);\n+\tif (!status) {\n+\t\thw->tnl.tbl[index].port = port;\n+\t\thw->tnl.tbl[index].in_use = true;\n+\t\thw->tnl.tbl[index].ref = 1;\n+\t}\n+\n+ice_create_tunnel_err:\n+\tice_pkg_buf_free(hw, bld);\n+\n+ice_create_tunnel_end:\n+\tmutex_unlock(&hw->tnl_lock);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * ice_destroy_tunnel\n+ * @hw: pointer to the HW structure\n+ * @port: port of tunnel to destroy (ignored if the all parameter is true)\n+ * @all: flag that states to destroy all tunnels\n+ *\n+ * Destroys a tunnel or all tunnels by creating an update package buffer\n+ * targeting the specific updates requested and then performing an update\n+ * package.\n+ */\n+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)\n+{\n+\tstruct ice_boost_tcam_section *sect_rx, *sect_tx;\n+\tenum ice_status status = ICE_ERR_MAX_LIMIT;\n+\tstruct ice_buf_build *bld;\n+\tu16 count = 0;\n+\tu16 index;\n+\tu16 size;\n+\tu16 i;\n+\n+\tmutex_lock(&hw->tnl_lock);\n+\n+\tif (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))\n+\t\tif (hw->tnl.tbl[index].ref > 1) {\n+\t\t\thw->tnl.tbl[index].ref--;\n+\t\t\tstatus = 0;\n+\t\t\tgoto ice_destroy_tunnel_end;\n+\t\t}\n+\n+\t/* determine count */\n+\tfor (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)\n+\t\tif (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&\n+\t\t    (all || hw->tnl.tbl[i].port == port))\n+\t\t\tcount++;\n+\n+\tif (!count) {\n+\t\tstatus = ICE_ERR_PARAM;\n+\t\tgoto ice_destroy_tunnel_end;\n+\t}\n+\n+\t/* size of section - there is at least one entry */\n+\tsize = struct_size(sect_rx, tcam, count - 1);\n+\n+\tbld = ice_pkg_buf_alloc(hw);\n+\tif (!bld) {\n+\t\tstatus = ICE_ERR_NO_MEMORY;\n+\t\tgoto ice_destroy_tunnel_end;\n+\t}\n+\n+\t/* allocate 2 sections, one for Rx parser, one for Tx parser */\n+\tif (ice_pkg_buf_reserve_section(bld, 2))\n+\t\tgoto ice_destroy_tunnel_err;\n+\n+\tsect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,\n+\t\t\t\t\t    size);\n+\tif (!sect_rx)\n+\t\tgoto ice_destroy_tunnel_err;\n+\tsect_rx->count = cpu_to_le16(1);\n+\n+\tsect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,\n+\t\t\t\t\t    size);\n+\tif (!sect_tx)\n+\t\tgoto ice_destroy_tunnel_err;\n+\tsect_tx->count = cpu_to_le16(1);\n+\n+\t/* copy original boost entry to update package buffer, one copy to Rx\n+\t * section, another copy to the Tx section\n+\t */\n+\tfor (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)\n+\t\tif (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&\n+\t\t    (all || hw->tnl.tbl[i].port == port)) {\n+\t\t\tmemcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry,\n+\t\t\t       sizeof(*sect_rx->tcam));\n+\t\t\tmemcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry,\n+\t\t\t       sizeof(*sect_tx->tcam));\n+\t\t\thw->tnl.tbl[i].marked = true;\n+\t\t}\n+\n+\tstatus = ice_update_pkg(hw, ice_pkg_buf(bld), 1);\n+\tif (!status)\n+\t\tfor (i = 0; i < hw->tnl.count &&\n+\t\t     i < ICE_TUNNEL_MAX_ENTRIES; i++)\n+\t\t\tif (hw->tnl.tbl[i].marked) {\n+\t\t\t\thw->tnl.tbl[i].ref = 0;\n+\t\t\t\thw->tnl.tbl[i].port = 0;\n+\t\t\t\thw->tnl.tbl[i].in_use = false;\n+\t\t\t\thw->tnl.tbl[i].marked = false;\n+\t\t\t}\n+\n+ice_destroy_tunnel_err:\n+\tice_pkg_buf_free(hw, bld);\n+\n+ice_destroy_tunnel_end:\n+\tmutex_unlock(&hw->tnl_lock);\n+\n+\treturn status;\n+}\n+\n /* PTG Management */\n \n /**\ndiff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h\nindex c7b5e1a6ea2b..70db213c9fe3 100644\n--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h\n+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h\n@@ -18,6 +18,11 @@\n \n #define ICE_PKG_CNT 4\n \n+enum ice_status\n+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);\n+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);\n+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);\n+\n enum ice_status\n ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],\n \t     struct ice_fv_word *es);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h\nindex 0fb3fe3ff3ea..249fb66fc230 100644\n--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h\n@@ -149,6 +149,7 @@ struct ice_buf_hdr {\n #define ICE_SID_CDID_REDIR_RSS\t\t48\n \n #define ICE_SID_RXPARSER_BOOST_TCAM\t56\n+#define ICE_SID_TXPARSER_BOOST_TCAM\t66\n \n #define ICE_SID_XLT0_PE\t\t\t80\n #define ICE_SID_XLT_KEY_BUILDER_PE\t81\n@@ -291,6 +292,38 @@ struct ice_pkg_enum {\n \tvoid *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);\n };\n \n+/* Tunnel enabling */\n+\n+enum ice_tunnel_type {\n+\tTNL_VXLAN = 0,\n+\tTNL_GENEVE,\n+\tTNL_LAST = 0xFF,\n+\tTNL_ALL = 0xFF,\n+};\n+\n+struct ice_tunnel_type_scan {\n+\tenum ice_tunnel_type type;\n+\tconst char *label_prefix;\n+};\n+\n+struct ice_tunnel_entry {\n+\tenum ice_tunnel_type type;\n+\tu16 boost_addr;\n+\tu16 port;\n+\tu16 ref;\n+\tstruct ice_boost_tcam_entry *boost_entry;\n+\tu8 valid;\n+\tu8 in_use;\n+\tu8 marked;\n+};\n+\n+#define ICE_TUNNEL_MAX_ENTRIES\t16\n+\n+struct ice_tunnel_table {\n+\tstruct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];\n+\tu16 count;\n+};\n+\n struct ice_pkg_es {\n \t__le16 count;\n \t__le16 offset;\ndiff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c\nindex 3de862a3c789..07875db08c3f 100644\n--- a/drivers/net/ethernet/intel/ice/ice_flow.c\n+++ b/drivers/net/ethernet/intel/ice/ice_flow.c\n@@ -42,7 +42,10 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {\n \tICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),\n \t/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */\n \tICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),\n-\n+\t/* GRE */\n+\t/* ICE_FLOW_FIELD_IDX_GRE_KEYID */\n+\tICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,\n+\t\t\t  sizeof_field(struct gre_full_hdr, key)),\n };\n \n /* Bitmaps indicating relevant packet types for a particular protocol header\n@@ -134,6 +137,18 @@ static const u32 ice_ptypes_sctp_il[] = {\n \t0x00000000, 0x00000000, 0x00000000, 0x00000000,\n };\n \n+/* Packet types for packets with an Outermost/First GRE header */\n+static const u32 ice_ptypes_gre_of[] = {\n+\t0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,\n+\t0x0000017E, 0x00000000, 0x00000000, 0x00000000,\n+\t0x00000000, 0x00000000, 0x00000000, 0x00000000,\n+\t0x00000000, 0x00000000, 0x00000000, 0x00000000,\n+\t0x00000000, 0x00000000, 0x00000000, 0x00000000,\n+\t0x00000000, 0x00000000, 0x00000000, 0x00000000,\n+\t0x00000000, 0x00000000, 0x00000000, 0x00000000,\n+\t0x00000000, 0x00000000, 0x00000000, 0x00000000,\n+};\n+\n /* Manage parameters and info. used during the creation of a flow profile */\n struct ice_flow_prof_params {\n \tenum ice_block blk;\n@@ -225,6 +240,12 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)\n \t\t\tsrc = (const unsigned long *)ice_ptypes_sctp_il;\n \t\t\tbitmap_and(params->ptypes, params->ptypes, src,\n \t\t\t\t   ICE_FLOW_PTYPE_MAX);\n+\t\t} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {\n+\t\t\tif (!i) {\n+\t\t\t\tsrc = (const unsigned long *)ice_ptypes_gre_of;\n+\t\t\t\tbitmap_and(params->ptypes, params->ptypes,\n+\t\t\t\t\t   src, ICE_FLOW_PTYPE_MAX);\n+\t\t\t}\n \t\t}\n \t}\n \n@@ -275,6 +296,9 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,\n \tcase ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:\n \t\tprot_id = ICE_PROT_SCTP_IL;\n \t\tbreak;\n+\tcase ICE_FLOW_FIELD_IDX_GRE_KEYID:\n+\t\tprot_id = ICE_PROT_GRE_OF;\n+\t\tbreak;\n \tdefault:\n \t\treturn ICE_ERR_NOT_IMPL;\n \t}\n@@ -945,6 +969,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)\n #define ICE_FLOW_PROF_ENCAP_M\t(BIT_ULL(ICE_FLOW_PROF_ENCAP_S))\n \n #define ICE_RSS_OUTER_HEADERS\t1\n+#define ICE_RSS_INNER_HEADERS\t2\n \n /* Flow profile ID format:\n  * [0:31] - Packet match fields\n@@ -1085,6 +1110,9 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,\n \tmutex_lock(&hw->rss_locks);\n \tstatus = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,\n \t\t\t\t      ICE_RSS_OUTER_HEADERS);\n+\tif (!status)\n+\t\tstatus = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,\n+\t\t\t\t\t      addl_hdrs, ICE_RSS_INNER_HEADERS);\n \tmutex_unlock(&hw->rss_locks);\n \n \treturn status;\n@@ -1238,6 +1266,12 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)\n \t\t\t\t\t\t      ICE_RSS_OUTER_HEADERS);\n \t\t\tif (status)\n \t\t\t\tbreak;\n+\t\t\tstatus = ice_add_rss_cfg_sync(hw, vsi_handle,\n+\t\t\t\t\t\t      r->hashed_flds,\n+\t\t\t\t\t\t      r->packet_hdr,\n+\t\t\t\t\t\t      ICE_RSS_INNER_HEADERS);\n+\t\t\tif (status)\n+\t\t\t\tbreak;\n \t\t}\n \t}\n \tmutex_unlock(&hw->rss_locks);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h\nindex 5558627bd5eb..00f2b7a9feed 100644\n--- a/drivers/net/ethernet/intel/ice/ice_flow.h\n+++ b/drivers/net/ethernet/intel/ice/ice_flow.h\n@@ -43,6 +43,7 @@ enum ice_flow_seg_hdr {\n \tICE_FLOW_SEG_HDR_TCP\t\t= 0x00000040,\n \tICE_FLOW_SEG_HDR_UDP\t\t= 0x00000080,\n \tICE_FLOW_SEG_HDR_SCTP\t\t= 0x00000100,\n+\tICE_FLOW_SEG_HDR_GRE\t\t= 0x00000200,\n };\n \n enum ice_flow_field {\n@@ -58,6 +59,8 @@ enum ice_flow_field {\n \tICE_FLOW_FIELD_IDX_UDP_DST_PORT,\n \tICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,\n \tICE_FLOW_FIELD_IDX_SCTP_DST_PORT,\n+\t/* GRE */\n+\tICE_FLOW_FIELD_IDX_GRE_KEYID,\n \t/* The total number of enums must not exceed 64 */\n \tICE_FLOW_FIELD_IDX_MAX\n };\ndiff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h\nindex 878e125d8b42..5d61acdec7ed 100644\n--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h\n+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h\n@@ -262,6 +262,12 @@ enum ice_rx_flex_desc_status_error_0_bits {\n \tICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */\n };\n \n+enum ice_rx_flex_desc_status_error_1_bits {\n+\t/* Note: These are predefined bit offsets */\n+\tICE_RX_FLEX_DESC_STATUS1_NAT_S = 4,\n+\tICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */\n+};\n+\n #define ICE_RXQ_CTX_SIZE_DWORDS\t\t8\n #define ICE_RXQ_CTX_SZ\t\t\t(ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))\n #define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS\t22\n@@ -413,6 +419,25 @@ enum ice_tx_ctx_desc_cmd_bits {\n \tICE_TX_CTX_DESC_RESERVED\t= 0x40\n };\n \n+enum ice_tx_ctx_desc_eipt_offload {\n+\tICE_TX_CTX_EIPT_NONE\t\t= 0x0,\n+\tICE_TX_CTX_EIPT_IPV6\t\t= 0x1,\n+\tICE_TX_CTX_EIPT_IPV4_NO_CSUM\t= 0x2,\n+\tICE_TX_CTX_EIPT_IPV4\t\t= 0x3\n+};\n+\n+#define ICE_TXD_CTX_QW0_EIPLEN_S\t2\n+\n+#define ICE_TXD_CTX_QW0_L4TUNT_S\t9\n+\n+#define ICE_TXD_CTX_UDP_TUNNELING\tBIT_ULL(ICE_TXD_CTX_QW0_L4TUNT_S)\n+#define ICE_TXD_CTX_GRE_TUNNELING\t(0x2ULL << ICE_TXD_CTX_QW0_L4TUNT_S)\n+\n+#define ICE_TXD_CTX_QW0_NATLEN_S\t12\n+\n+#define ICE_TXD_CTX_QW0_L4T_CS_S\t23\n+#define ICE_TXD_CTX_QW0_L4T_CS_M\tBIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S)\n+\n #define ICE_LAN_TXQ_MAX_QGRPS\t127\n #define ICE_LAN_TXQ_MAX_QDIS\t1023\n \ndiff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c\nindex 89c090d32bb2..92726a5ae040 100644\n--- a/drivers/net/ethernet/intel/ice/ice_main.c\n+++ b/drivers/net/ethernet/intel/ice/ice_main.c\n@@ -2341,13 +2341,27 @@ static void ice_set_netdev_features(struct net_device *netdev)\n \t\t\t NETIF_F_HW_VLAN_CTAG_TX     |\n \t\t\t NETIF_F_HW_VLAN_CTAG_RX;\n \n-\ttso_features = NETIF_F_TSO\t\t|\n+\ttso_features = NETIF_F_TSO\t\t\t|\n+\t\t       NETIF_F_TSO_ECN\t\t\t|\n+\t\t       NETIF_F_TSO6\t\t\t|\n+\t\t       NETIF_F_GSO_GRE\t\t\t|\n+\t\t       NETIF_F_GSO_UDP_TUNNEL\t\t|\n+\t\t       NETIF_F_GSO_GRE_CSUM\t\t|\n+\t\t       NETIF_F_GSO_UDP_TUNNEL_CSUM\t|\n+\t\t       NETIF_F_GSO_PARTIAL\t\t|\n+\t\t       NETIF_F_GSO_IPXIP4\t\t|\n+\t\t       NETIF_F_GSO_IPXIP6\t\t|\n \t\t       NETIF_F_GSO_UDP_L4;\n \n+\tnetdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |\n+\t\t\t\t\tNETIF_F_GSO_GRE_CSUM;\n \t/* set features that user can change */\n \tnetdev->hw_features = dflt_features | csumo_features |\n \t\t\t      vlano_features | tso_features;\n \n+\t/* add support for HW_CSUM on packets with MPLS header */\n+\tnetdev->mpls_features =  NETIF_F_HW_CSUM;\n+\n \t/* enable features */\n \tnetdev->features |= netdev->hw_features;\n \t/* encap and VLAN devices inherit default, csumo and tso features */\n@@ -5128,6 +5142,70 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)\n \tpf->tx_timeout_recovery_level++;\n }\n \n+/**\n+ * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up\n+ * @netdev: This physical port's netdev\n+ * @ti: Tunnel endpoint information\n+ */\n+static void\n+ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tstruct ice_pf *pf = vsi->back;\n+\tenum ice_tunnel_type tnl_type;\n+\tu16 port = ntohs(ti->port);\n+\tenum ice_status status;\n+\n+\tswitch (ti->type) {\n+\tcase UDP_TUNNEL_TYPE_VXLAN:\n+\t\ttnl_type = TNL_VXLAN;\n+\t\tbreak;\n+\tcase UDP_TUNNEL_TYPE_GENEVE:\n+\t\ttnl_type = TNL_GENEVE;\n+\t\tbreak;\n+\tdefault:\n+\t\tnetdev_err(netdev, \"Unknown tunnel type\\n\");\n+\t\treturn;\n+\t}\n+\n+\tstatus = ice_create_tunnel(&pf->hw, tnl_type, port);\n+\tif (status == ICE_ERR_OUT_OF_RANGE)\n+\t\tnetdev_info(netdev, \"Max tunneled UDP ports reached, port %d not added\\n\",\n+\t\t\t    port);\n+\telse if (status)\n+\t\tnetdev_err(netdev, \"Error adding UDP tunnel - %d\\n\",\n+\t\t\t   status);\n+}\n+\n+/**\n+ * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away\n+ * @netdev: This physical port's netdev\n+ * @ti: Tunnel endpoint information\n+ */\n+static void\n+ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)\n+{\n+\tstruct ice_netdev_priv *np = netdev_priv(netdev);\n+\tstruct ice_vsi *vsi = np->vsi;\n+\tstruct ice_pf *pf = vsi->back;\n+\tu16 port = ntohs(ti->port);\n+\tenum ice_status status;\n+\tbool retval;\n+\n+\tretval = ice_tunnel_port_in_use(&pf->hw, port, NULL);\n+\tif (!retval) {\n+\t\tnetdev_info(netdev, \"port %d not found in UDP tunnels list\\n\",\n+\t\t\t    port);\n+\t\treturn;\n+\t}\n+\n+\tstatus = ice_destroy_tunnel(&pf->hw, port, false);\n+\tif (status)\n+\t\tnetdev_err(netdev, \"error deleting port %d from UDP tunnels list\\n\",\n+\t\t\t   port);\n+}\n+\n /**\n  * ice_open - Called when a network interface becomes active\n  * @netdev: network interface device structure\n@@ -5184,6 +5262,10 @@ int ice_open(struct net_device *netdev)\n \tif (err)\n \t\tnetdev_err(netdev, \"Failed to open VSI 0x%04X on switch 0x%04X\\n\",\n \t\t\t   vsi->vsi_num, vsi->vsw->sw_id);\n+\n+\t/* Update existing tunnels information */\n+\tudp_tunnel_get_rx_info(netdev);\n+\n \treturn err;\n }\n \n@@ -5234,21 +5316,21 @@ ice_features_check(struct sk_buff *skb,\n \t\tfeatures &= ~NETIF_F_GSO_MASK;\n \n \tlen = skb_network_header(skb) - skb->data;\n-\tif (len & ~(ICE_TXD_MACLEN_MAX))\n+\tif (len > ICE_TXD_MACLEN_MAX || len & 0x1)\n \t\tgoto out_rm_features;\n \n \tlen = skb_transport_header(skb) - skb_network_header(skb);\n-\tif (len & ~(ICE_TXD_IPLEN_MAX))\n+\tif (len > ICE_TXD_IPLEN_MAX || len & 0x1)\n \t\tgoto out_rm_features;\n \n \tif (skb->encapsulation) {\n \t\tlen = skb_inner_network_header(skb) - skb_transport_header(skb);\n-\t\tif (len & ~(ICE_TXD_L4LEN_MAX))\n+\t\tif (len > ICE_TXD_L4LEN_MAX || len & 0x1)\n \t\t\tgoto out_rm_features;\n \n \t\tlen = skb_inner_transport_header(skb) -\n \t\t      skb_inner_network_header(skb);\n-\t\tif (len & ~(ICE_TXD_IPLEN_MAX))\n+\t\tif (len > ICE_TXD_IPLEN_MAX || len & 0x1)\n \t\t\tgoto out_rm_features;\n \t}\n \n@@ -5297,4 +5379,6 @@ static const struct net_device_ops ice_netdev_ops = {\n \t.ndo_bpf = ice_xdp,\n \t.ndo_xdp_xmit = ice_xdp_xmit,\n \t.ndo_xsk_wakeup = ice_xsk_wakeup,\n+\t.ndo_udp_tunnel_add = ice_udp_tunnel_add,\n+\t.ndo_udp_tunnel_del = ice_udp_tunnel_del,\n };\ndiff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h\nindex 71647566964e..678db6bf7f57 100644\n--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h\n@@ -18,6 +18,7 @@ enum ice_prot_id {\n \tICE_PROT_IPV6_IL\t= 41,\n \tICE_PROT_TCP_IL\t\t= 49,\n \tICE_PROT_UDP_IL_OR_S\t= 53,\n+\tICE_PROT_GRE_OF\t\t= 64,\n \tICE_PROT_SCTP_IL\t= 96,\n \tICE_PROT_META_ID\t= 255, /* when offset == metadata */\n \tICE_PROT_INVALID\t= 255  /* when offset == ICE_FV_OFFSET_INVAL */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c\nindex f67e8362958c..cb64436507d3 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.c\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c\n@@ -1791,12 +1791,94 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)\n \tl2_len = ip.hdr - skb->data;\n \toffset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;\n \n-\tif (skb->encapsulation)\n-\t\treturn -1;\n+\tprotocol = vlan_get_protocol(skb);\n+\n+\tif (protocol == htons(ETH_P_IP))\n+\t\tfirst->tx_flags |= ICE_TX_FLAGS_IPV4;\n+\telse if (protocol == htons(ETH_P_IPV6))\n+\t\tfirst->tx_flags |= ICE_TX_FLAGS_IPV6;\n+\n+\tif (skb->encapsulation) {\n+\t\tbool gso_ena = false;\n+\t\tu32 tunnel = 0;\n+\n+\t\t/* define outer network header type */\n+\t\tif (first->tx_flags & ICE_TX_FLAGS_IPV4) {\n+\t\t\ttunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?\n+\t\t\t\t  ICE_TX_CTX_EIPT_IPV4 :\n+\t\t\t\t  ICE_TX_CTX_EIPT_IPV4_NO_CSUM;\n+\t\t\tl4_proto = ip.v4->protocol;\n+\t\t} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {\n+\t\t\ttunnel |= ICE_TX_CTX_EIPT_IPV6;\n+\t\t\texthdr = ip.hdr + sizeof(*ip.v6);\n+\t\t\tl4_proto = ip.v6->nexthdr;\n+\t\t\tif (l4.hdr != exthdr)\n+\t\t\t\tipv6_skip_exthdr(skb, exthdr - skb->data,\n+\t\t\t\t\t\t &l4_proto, &frag_off);\n+\t\t}\n+\n+\t\t/* define outer transport */\n+\t\tswitch (l4_proto) {\n+\t\tcase IPPROTO_UDP:\n+\t\t\ttunnel |= ICE_TXD_CTX_UDP_TUNNELING;\n+\t\t\tfirst->tx_flags |= ICE_TX_FLAGS_TUNNEL;\n+\t\t\tbreak;\n+\t\tcase IPPROTO_GRE:\n+\t\t\ttunnel |= ICE_TXD_CTX_GRE_TUNNELING;\n+\t\t\tfirst->tx_flags |= ICE_TX_FLAGS_TUNNEL;\n+\t\t\tbreak;\n+\t\tcase IPPROTO_IPIP:\n+\t\tcase IPPROTO_IPV6:\n+\t\t\tfirst->tx_flags |= ICE_TX_FLAGS_TUNNEL;\n+\t\t\tl4.hdr = skb_inner_network_header(skb);\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tif (first->tx_flags & ICE_TX_FLAGS_TSO)\n+\t\t\t\treturn -1;\n+\n+\t\t\tskb_checksum_help(skb);\n+\t\t\treturn 0;\n+\t\t}\n+\n+\t\t/* compute outer L3 header size */\n+\t\ttunnel |= ((l4.hdr - ip.hdr) / 4) <<\n+\t\t\t  ICE_TXD_CTX_QW0_EIPLEN_S;\n+\n+\t\t/* switch IP header pointer from outer to inner header */\n+\t\tip.hdr = skb_inner_network_header(skb);\n+\n+\t\t/* compute tunnel header size */\n+\t\ttunnel |= ((ip.hdr - l4.hdr) / 2) <<\n+\t\t\t   ICE_TXD_CTX_QW0_NATLEN_S;\n+\n+\t\tgso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;\n+\t\t/* indicate if we need to offload outer UDP header */\n+\t\tif ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&\n+\t\t    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))\n+\t\t\ttunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;\n+\n+\t\t/* record tunnel offload values */\n+\t\toff->cd_tunnel_params |= tunnel;\n+\n+\t\t/* set DTYP=1 to indicate that it's an Tx context descriptor\n+\t\t * in IPsec tunnel mode with Tx offloads in Quad word 1\n+\t\t */\n+\t\toff->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;\n+\n+\t\t/* switch L4 header pointer from outer to inner */\n+\t\tl4.hdr = skb_inner_transport_header(skb);\n+\t\tl4_proto = 0;\n+\n+\t\t/* reset type as we transition from outer to inner headers */\n+\t\tfirst->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);\n+\t\tif (ip.v4->version == 4)\n+\t\t\tfirst->tx_flags |= ICE_TX_FLAGS_IPV4;\n+\t\tif (ip.v6->version == 6)\n+\t\t\tfirst->tx_flags |= ICE_TX_FLAGS_IPV6;\n+\t}\n \n \t/* Enable IP checksum offloads */\n-\tprotocol = vlan_get_protocol(skb);\n-\tif (protocol == htons(ETH_P_IP)) {\n+\tif (first->tx_flags & ICE_TX_FLAGS_IPV4) {\n \t\tl4_proto = ip.v4->protocol;\n \t\t/* the stack computes the IP header already, the only time we\n \t\t * need the hardware to recompute it is in the case of TSO.\n@@ -1806,7 +1888,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)\n \t\telse\n \t\t\tcmd |= ICE_TX_DESC_CMD_IIPT_IPV4;\n \n-\t} else if (protocol == htons(ETH_P_IPV6)) {\n+\t} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {\n \t\tcmd |= ICE_TX_DESC_CMD_IIPT_IPV6;\n \t\texthdr = ip.hdr + sizeof(*ip.v6);\n \t\tl4_proto = ip.v6->nexthdr;\n@@ -1953,6 +2035,40 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)\n \t\tip.v6->payload_len = 0;\n \t}\n \n+\tif (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |\n+\t\t\t\t\t SKB_GSO_GRE_CSUM |\n+\t\t\t\t\t SKB_GSO_IPXIP4 |\n+\t\t\t\t\t SKB_GSO_IPXIP6 |\n+\t\t\t\t\t SKB_GSO_UDP_TUNNEL |\n+\t\t\t\t\t SKB_GSO_UDP_TUNNEL_CSUM)) {\n+\t\tif (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&\n+\t\t    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {\n+\t\t\tl4.udp->len = 0;\n+\n+\t\t\t/* determine offset of outer transport header */\n+\t\t\tl4_start = l4.hdr - skb->data;\n+\n+\t\t\t/* remove payload length from outer checksum */\n+\t\t\tpaylen = skb->len - l4_start;\n+\t\t\tcsum_replace_by_diff(&l4.udp->check,\n+\t\t\t\t\t     (__force __wsum)htonl(paylen));\n+\t\t}\n+\n+\t\t/* reset pointers to inner headers */\n+\n+\t\t/* cppcheck-suppress unreadVariable */\n+\t\tip.hdr = skb_inner_network_header(skb);\n+\t\tl4.hdr = skb_inner_transport_header(skb);\n+\n+\t\t/* initialize inner IP header fields */\n+\t\tif (ip.v4->version == 4) {\n+\t\t\tip.v4->tot_len = 0;\n+\t\t\tip.v4->check = 0;\n+\t\t} else {\n+\t\t\tip.v6->payload_len = 0;\n+\t\t}\n+\t}\n+\n \t/* determine offset of transport header */\n \tl4_start = l4.hdr - skb->data;\n \ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h\nindex 7ee00a128663..025dd642cf28 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.h\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h\n@@ -113,6 +113,9 @@ static inline int ice_skb_pad(void)\n #define ICE_TX_FLAGS_TSO\tBIT(0)\n #define ICE_TX_FLAGS_HW_VLAN\tBIT(1)\n #define ICE_TX_FLAGS_SW_VLAN\tBIT(2)\n+#define ICE_TX_FLAGS_IPV4\tBIT(5)\n+#define ICE_TX_FLAGS_IPV6\tBIT(6)\n+#define ICE_TX_FLAGS_TUNNEL\tBIT(7)\n #define ICE_TX_FLAGS_VLAN_M\t0xffff0000\n #define ICE_TX_FLAGS_VLAN_PR_M\t0xe0000000\n #define ICE_TX_FLAGS_VLAN_PR_S\t29\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c\nindex 6da048a6ca7c..1f9c3d24cde7 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c\n@@ -84,12 +84,17 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,\n \t    union ice_32b_rx_flex_desc *rx_desc, u8 ptype)\n {\n \tstruct ice_rx_ptype_decoded decoded;\n-\tu32 rx_error, rx_status;\n+\tu16 rx_error, rx_status;\n+\tu16 rx_stat_err1;\n \tbool ipv4, ipv6;\n \n \trx_status = le16_to_cpu(rx_desc->wb.status_error0);\n-\trx_error = rx_status;\n+\trx_error = rx_status & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |\n+\t\t\t\tBIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |\n+\t\t\t\tBIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |\n+\t\t\t\tBIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S));\n \n+\trx_stat_err1 = le16_to_cpu(rx_desc->wb.status_error1);\n \tdecoded = ice_decode_rx_desc_ptype(ptype);\n \n \t/* Start with CHECKSUM_NONE and by default csum_level = 0 */\n@@ -125,6 +130,18 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,\n \tif (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))\n \t\tgoto checksum_fail;\n \n+\t/* check for outer UDP checksum error in tunneled packets */\n+\tif ((rx_stat_err1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&\n+\t    (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))\n+\t\tgoto checksum_fail;\n+\n+\t/* If there is an outer header present that might contain a checksum\n+\t * we need to bump the checksum level by 1 to reflect the fact that\n+\t * we are indicating we validated the inner checksum.\n+\t */\n+\tif (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)\n+\t\tskb->csum_level = 1;\n+\n \t/* Only report checksum unnecessary for TCP, UDP, or SCTP */\n \tswitch (decoded.inner_prot) {\n \tcase ICE_RX_PTYPE_INNER_PROT_TCP:\ndiff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h\nindex db0ef6ba907f..834781175a4d 100644\n--- a/drivers/net/ethernet/intel/ice/ice_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_type.h\n@@ -559,6 +559,10 @@ struct ice_hw {\n \tu8 *pkg_copy;\n \tu32 pkg_size;\n \n+\t/* tunneling info */\n+\tstruct mutex tnl_lock;\n+\tstruct ice_tunnel_table tnl;\n+\n \t/* HW block tables */\n \tstruct ice_blk_info blk[ICE_BLK_COUNT];\n \tstruct mutex fl_profs_locks[ICE_BLK_COUNT];\t/* lock fltr profiles */\n",
    "prefixes": [
        "S40",
        "v2",
        "4/15"
    ]
}