Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1288153/?format=api
{ "id": 1288153, "url": "http://patchwork.ozlabs.org/api/patches/1288153/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20200512010146.41303-3-anthony.l.nguyen@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20200512010146.41303-3-anthony.l.nguyen@intel.com>", "list_archive_url": null, "date": "2020-05-12T01:01:42", "name": "[S42,3/7] ice: Support IPv4 Flow Director filters", "commit_ref": null, "pull_url": null, "state": "accepted", "archived": false, "hash": "b6746c4266a9f925cbd3ce66867c81cbcc9556f4", "submitter": { "id": 68875, "url": "http://patchwork.ozlabs.org/api/people/68875/?format=api", "name": "Tony Nguyen", "email": "anthony.l.nguyen@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20200512010146.41303-3-anthony.l.nguyen@intel.com/mbox/", "series": [ { "id": 176248, "url": "http://patchwork.ozlabs.org/api/series/176248/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=176248", "date": "2020-05-12T01:01:45", "name": "[S42,1/7] ice: Initialize Flow Director resources", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/176248/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1288153/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/1288153/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=osuosl.org\n (client-ip=140.211.166.136; helo=silver.osuosl.org;\n envelope-from=intel-wired-lan-bounces@osuosl.org; receiver=<UNKNOWN>)", "ozlabs.org;\n dmarc=fail (p=none dis=none) header.from=intel.com" ], "Received": [ "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 49Lfjg5cQnz9sRf\n\tfor <incoming@patchwork.ozlabs.org>; Tue, 12 May 2020 11:05:11 +1000 (AEST)", "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id 04D0D25CF1;\n\tTue, 12 May 2020 01:05:10 +0000 (UTC)", "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id bjwiMR0mEnj4; Tue, 12 May 2020 01:04:41 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby silver.osuosl.org (Postfix) with ESMTP id 36F3729432;\n\tTue, 12 May 2020 01:04:35 +0000 (UTC)", "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n by ash.osuosl.org (Postfix) with ESMTP id B2DE41BF2C2\n for <intel-wired-lan@lists.osuosl.org>; Tue, 12 May 2020 01:04:33 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n by whitealder.osuosl.org (Postfix) with ESMTP id A53C186DFC\n for <intel-wired-lan@lists.osuosl.org>; Tue, 12 May 2020 01:04:33 +0000 (UTC)", "from whitealder.osuosl.org ([127.0.0.1])\n by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n with ESMTP id ycXHNoNpnd5p for <intel-wired-lan@lists.osuosl.org>;\n Tue, 12 May 2020 01:04:20 +0000 (UTC)", "from mga11.intel.com (mga11.intel.com [192.55.52.93])\n by whitealder.osuosl.org (Postfix) with ESMTPS id 6B15E8680C\n for <intel-wired-lan@lists.osuosl.org>; Tue, 12 May 2020 01:04:20 +0000 (UTC)", "from orsmga004.jf.intel.com ([10.7.209.38])\n by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 11 May 2020 18:04:19 -0700", "from unknown (HELO localhost.jf.intel.com) ([10.166.241.65])\n by orsmga004.jf.intel.com with ESMTP; 11 May 2020 18:04:19 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "IronPort-SDR": [ "\n JcTD443WL9mgDZKXL6KdCJ2DelukK2QnPs6LnI+SLb4XHSK2jOz1PaS/2rE4aad1+w/1d9MoY1\n Lf6W3wRO2XWA==", "\n kodgiYJSVBSiOftSNqL3Nfye2Q27Yv6fiNgjkYNNLAFUD1z4CJEqdjChWzuLMGE8D2yfpQ/3nO\n xcY2inrbOnvQ==" ], "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.73,381,1583222400\"; d=\"scan'208\";a=\"409116466\"", "From": "Tony Nguyen <anthony.l.nguyen@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Mon, 11 May 2020 18:01:42 -0700", "Message-Id": "<20200512010146.41303-3-anthony.l.nguyen@intel.com>", "X-Mailer": "git-send-email 2.20.1", "In-Reply-To": "<20200512010146.41303-1-anthony.l.nguyen@intel.com>", "References": "<20200512010146.41303-1-anthony.l.nguyen@intel.com>", "MIME-Version": "1.0", "Subject": "[Intel-wired-lan] [PATCH S42 3/7] ice: Support IPv4 Flow Director\n filters", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n <intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>,\n <mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>,\n <mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "From: Henry Tieman <henry.w.tieman@intel.com>\n\nSupport the addition and deletion of IPv4 filters.\n\nSupported fields are: src-ip, dst-ip, src-port, and dst-port\nSupported flow-types are: tcp4, udp4, sctp4, ip4\n\nExample usage:\n\nethtool -N eth0 flow-type tcp4 src-ip 192.168.0.55 dst-ip 172.16.0.55 \\\nsrc-port 16 dst-port 12 action 32\n\nSigned-off-by: Henry Tieman <henry.w.tieman@intel.com>\nSigned-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>\n---\n drivers/net/ethernet/intel/ice/ice.h | 4 +\n drivers/net/ethernet/intel/ice/ice_ethtool.c | 4 +\n .../net/ethernet/intel/ice/ice_ethtool_fdir.c | 658 ++++++++++++++++++\n drivers/net/ethernet/intel/ice/ice_fdir.c | 513 ++++++++++++++\n drivers/net/ethernet/intel/ice/ice_fdir.h | 79 +++\n .../net/ethernet/intel/ice/ice_flex_pipe.c | 34 +\n .../net/ethernet/intel/ice/ice_flex_pipe.h | 3 +\n .../net/ethernet/intel/ice/ice_hw_autogen.h | 6 +\n .../net/ethernet/intel/ice/ice_lan_tx_rx.h | 101 +++\n drivers/net/ethernet/intel/ice/ice_txrx.c | 82 +++\n drivers/net/ethernet/intel/ice/ice_txrx.h | 3 +\n drivers/net/ethernet/intel/ice/ice_type.h | 6 +\n 12 files changed, 1493 insertions(+)", "diff": "diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h\nindex 51caf3a3c76b..97f95822fee1 100644\n--- a/drivers/net/ethernet/intel/ice/ice.h\n+++ b/drivers/net/ethernet/intel/ice/ice.h\n@@ -99,6 +99,7 @@ extern const char ice_drv_ver[];\n #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))\n #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))\n #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))\n+#define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))\n \n /* Macro for each VSI in a PF */\n #define ice_for_each_vsi(pf, i) \\\n@@ -219,6 +220,7 @@ enum ice_state {\n \t__ICE_CFG_BUSY,\n \t__ICE_SERVICE_SCHED,\n \t__ICE_SERVICE_DIS,\n+\t__ICE_FD_FLUSH_REQ,\n \t__ICE_OICR_INTR_DIS,\t\t/* Global OICR interrupt disabled */\n \t__ICE_MDD_VF_PRINT_PENDING,\t/* set when MDD event handle */\n \t__ICE_VF_RESETS_DISABLED,\t/* disable resets during ice_remove */\n@@ -571,6 +573,8 @@ ice_for_each_peer(struct ice_pf *pf, void *data,\n const char *ice_stat_str(enum ice_status stat_err);\n const char *ice_aq_str(enum ice_aq_err aq_err);\n void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena);\n+int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);\n+int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);\n int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd);\n int\n ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,\ndiff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c\nindex f77db28e1e4c..72105d70cead 100644\n--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c\n+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c\n@@ -2537,6 +2537,10 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)\n \tstruct ice_vsi *vsi = np->vsi;\n \n \tswitch (cmd->cmd) {\n+\tcase ETHTOOL_SRXCLSRLINS:\n+\t\treturn ice_add_fdir_ethtool(vsi, cmd);\n+\tcase ETHTOOL_SRXCLSRLDEL:\n+\t\treturn ice_del_fdir_ethtool(vsi, cmd);\n \tcase ETHTOOL_SRXFH:\n \t\treturn ice_set_rss_hash_opt(vsi, cmd);\n \tdefault:\ndiff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c\nindex a7349cdb5ed1..b584b7a03f10 100644\n--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c\n+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c\n@@ -265,6 +265,43 @@ void ice_fdir_release_flows(struct ice_hw *hw)\n \t\tice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow);\n }\n \n+/**\n+ * ice_fdir_num_avail_fltr - return the number of unused flow director filters\n+ * @hw: pointer to hardware structure\n+ * @vsi: software VSI structure\n+ *\n+ * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can\n+ * use filters from either pool. The guaranteed pool is divided between VSIs.\n+ * The best effort filter pool is common to all VSIs and is a device shared\n+ * resource pool. The number of filters available to this VSI is the sum of\n+ * the VSIs guaranteed filter pool and the global available best effort\n+ * filter pool.\n+ *\n+ * Returns the number of available flow director filters to this VSI\n+ */\n+static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)\n+{\n+\tu16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);\n+\tu16 num_guar;\n+\tu16 num_be;\n+\n+\t/* total guaranteed filters assigned to this VSI */\n+\tnum_guar = vsi->num_gfltr;\n+\n+\t/* minus the guaranteed filters programed by this VSI */\n+\tnum_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) &\n+\t\t VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S;\n+\n+\t/* total global best effort filters */\n+\tnum_be = hw->func_caps.fd_fltr_best_effort;\n+\n+\t/* minus the global best effort filters programmed */\n+\tnum_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >>\n+\t\t GLQF_FD_CNT_FD_BCNT_S;\n+\n+\treturn num_guar + num_be;\n+}\n+\n /**\n * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s)\n * @hw: HW structure containing the FDir flow profile structure(s)\n@@ -344,6 +381,14 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,\n \t\tif (!memcmp(old_seg, seg, sizeof(*seg)))\n \t\t\treturn -EEXIST;\n \n+\t\t/* if there are FDir filters using this flow,\n+\t\t * then return error.\n+\t\t */\n+\t\tif (hw->fdir_fltr_cnt[flow]) {\n+\t\t\tdev_err(dev, \"Failed to add filter. Flow director filters on each port must have the same input set.\\n\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n \t\t/* remove HW filter definition */\n \t\tice_fdir_rem_flow(hw, ICE_BLK_FD, flow);\n \t}\n@@ -508,6 +553,347 @@ ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)\n \treturn -EOPNOTSUPP;\n }\n \n+/**\n+ * ice_set_fdir_ip4_seg\n+ * @seg: flow segment for programming\n+ * @tcp_ip4_spec: mask data from ethtool\n+ * @l4_proto: Layer 4 protocol to program\n+ * @perfect_fltr: only valid on success; returns true if perfect filter,\n+ *\t\t false if not\n+ *\n+ * Set the mask data into the flow segment to be used to program HW\n+ * table based on provided L4 protocol for IPv4\n+ */\n+static int\n+ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg,\n+\t\t struct ethtool_tcpip4_spec *tcp_ip4_spec,\n+\t\t enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)\n+{\n+\tenum ice_flow_field src_port, dst_port;\n+\n+\t/* make sure we don't have any empty rule */\n+\tif (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src &&\n+\t !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst)\n+\t\treturn -EINVAL;\n+\n+\t/* filtering on TOS not supported */\n+\tif (tcp_ip4_spec->tos)\n+\t\treturn -EOPNOTSUPP;\n+\n+\tif (l4_proto == ICE_FLOW_SEG_HDR_TCP) {\n+\t\tsrc_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;\n+\t\tdst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;\n+\t} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {\n+\t\tsrc_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;\n+\t\tdst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;\n+\t} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {\n+\t\tsrc_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;\n+\t\tdst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;\n+\t} else {\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\t*perfect_fltr = true;\n+\tICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto);\n+\n+\t/* IP source address */\n+\tif (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))\n+\t\tice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,\n+\t\t\t\t ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,\n+\t\t\t\t ICE_FLOW_FLD_OFF_INVAL, false);\n+\telse if (!tcp_ip4_spec->ip4src)\n+\t\t*perfect_fltr = false;\n+\telse\n+\t\treturn -EOPNOTSUPP;\n+\n+\t/* IP destination address */\n+\tif (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))\n+\t\tice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,\n+\t\t\t\t ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,\n+\t\t\t\t ICE_FLOW_FLD_OFF_INVAL, false);\n+\telse if (!tcp_ip4_spec->ip4dst)\n+\t\t*perfect_fltr = false;\n+\telse\n+\t\treturn -EOPNOTSUPP;\n+\n+\t/* Layer 4 source port */\n+\tif (tcp_ip4_spec->psrc == htons(0xFFFF))\n+\t\tice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,\n+\t\t\t\t ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,\n+\t\t\t\t false);\n+\telse if (!tcp_ip4_spec->psrc)\n+\t\t*perfect_fltr = false;\n+\telse\n+\t\treturn -EOPNOTSUPP;\n+\n+\t/* Layer 4 destination port */\n+\tif (tcp_ip4_spec->pdst == htons(0xFFFF))\n+\t\tice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,\n+\t\t\t\t ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,\n+\t\t\t\t false);\n+\telse if (!tcp_ip4_spec->pdst)\n+\t\t*perfect_fltr = false;\n+\telse\n+\t\treturn -EOPNOTSUPP;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_set_fdir_ip4_usr_seg\n+ * @seg: flow segment for programming\n+ * @usr_ip4_spec: ethtool userdef packet offset\n+ * @perfect_fltr: only valid on success; returns true if perfect filter,\n+ *\t\t false if not\n+ *\n+ * Set the offset data into the flow segment to be used to program HW\n+ * table for IPv4\n+ */\n+static int\n+ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg,\n+\t\t\t struct ethtool_usrip4_spec *usr_ip4_spec,\n+\t\t\t bool *perfect_fltr)\n+{\n+\t/* first 4 bytes of Layer 4 header */\n+\tif (usr_ip4_spec->l4_4_bytes)\n+\t\treturn -EINVAL;\n+\tif (usr_ip4_spec->tos)\n+\t\treturn -EINVAL;\n+\tif (usr_ip4_spec->ip_ver)\n+\t\treturn -EINVAL;\n+\t/* Filtering on Layer 4 protocol not supported */\n+\tif (usr_ip4_spec->proto)\n+\t\treturn -EOPNOTSUPP;\n+\t/* empty rules are not valid */\n+\tif (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst)\n+\t\treturn -EINVAL;\n+\n+\t*perfect_fltr = true;\n+\tICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);\n+\n+\t/* IP source address */\n+\tif (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))\n+\t\tice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,\n+\t\t\t\t ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,\n+\t\t\t\t ICE_FLOW_FLD_OFF_INVAL, false);\n+\telse if (!usr_ip4_spec->ip4src)\n+\t\t*perfect_fltr = false;\n+\telse\n+\t\treturn -EOPNOTSUPP;\n+\n+\t/* IP destination address */\n+\tif (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))\n+\t\tice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,\n+\t\t\t\t ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,\n+\t\t\t\t ICE_FLOW_FLD_OFF_INVAL, false);\n+\telse if (!usr_ip4_spec->ip4dst)\n+\t\t*perfect_fltr = false;\n+\telse\n+\t\treturn -EOPNOTSUPP;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter\n+ * @pf: PF structure\n+ * @fsp: pointer to ethtool Rx flow specification\n+ *\n+ * Returns 0 on success.\n+ */\n+static int\n+ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp)\n+{\n+\tstruct ice_flow_seg_info *seg, *tun_seg;\n+\tstruct device *dev = ice_pf_to_dev(pf);\n+\tenum ice_fltr_ptype fltr_idx;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tbool perfect_filter;\n+\tint ret;\n+\n+\tseg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);\n+\tif (!seg)\n+\t\treturn -ENOMEM;\n+\n+\ttun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,\n+\t\t\t GFP_KERNEL);\n+\tif (!tun_seg) {\n+\t\tdevm_kfree(dev, seg);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tswitch (fsp->flow_type & ~FLOW_EXT) {\n+\tcase TCP_V4_FLOW:\n+\t\tret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,\n+\t\t\t\t\t ICE_FLOW_SEG_HDR_TCP,\n+\t\t\t\t\t &perfect_filter);\n+\t\tbreak;\n+\tcase UDP_V4_FLOW:\n+\t\tret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,\n+\t\t\t\t\t ICE_FLOW_SEG_HDR_UDP,\n+\t\t\t\t\t &perfect_filter);\n+\t\tbreak;\n+\tcase SCTP_V4_FLOW:\n+\t\tret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,\n+\t\t\t\t\t ICE_FLOW_SEG_HDR_SCTP,\n+\t\t\t\t\t &perfect_filter);\n+\t\tbreak;\n+\tcase IPV4_USER_FLOW:\n+\t\tret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec,\n+\t\t\t\t\t &perfect_filter);\n+\t\tbreak;\n+\tdefault:\n+\t\tret = -EINVAL;\n+\t}\n+\tif (ret)\n+\t\tgoto err_exit;\n+\n+\t/* tunnel segments are shifted up one. */\n+\tmemcpy(&tun_seg[1], seg, sizeof(*seg));\n+\n+\t/* add filter for outer headers */\n+\tfltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);\n+\tret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,\n+\t\t\t\t\tICE_FD_HW_SEG_NON_TUN);\n+\tif (ret == -EEXIST)\n+\t\t/* Rule already exists, free memory and continue */\n+\t\tdevm_kfree(dev, seg);\n+\telse if (ret)\n+\t\t/* could not write filter, free memory */\n+\t\tgoto err_exit;\n+\n+\t/* make tunneled filter HW entries if possible */\n+\tmemcpy(&tun_seg[1], seg, sizeof(*seg));\n+\tret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx,\n+\t\t\t\t\tICE_FD_HW_SEG_TUN);\n+\tif (ret == -EEXIST) {\n+\t\t/* Rule already exists, free memory and count as success */\n+\t\tdevm_kfree(dev, tun_seg);\n+\t\tret = 0;\n+\t} else if (ret) {\n+\t\t/* could not write tunnel filter, but outer filter exists */\n+\t\tdevm_kfree(dev, tun_seg);\n+\t}\n+\n+\tif (perfect_filter)\n+\t\tset_bit(fltr_idx, hw->fdir_perfect_fltr);\n+\telse\n+\t\tclear_bit(fltr_idx, hw->fdir_perfect_fltr);\n+\n+\treturn ret;\n+\n+err_exit:\n+\tdevm_kfree(dev, tun_seg);\n+\tdevm_kfree(dev, seg);\n+\n+\treturn -EOPNOTSUPP;\n+}\n+\n+/**\n+ * ice_fdir_write_fltr - send a flow director filter to the hardware\n+ * @pf: PF data structure\n+ * @input: filter structure\n+ * @add: true adds filter and false removed filter\n+ * @is_tun: true adds inner filter on tunnel and false outer headers\n+ *\n+ * returns 0 on success and negative value on error\n+ */\n+static int\n+ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,\n+\t\t bool is_tun)\n+{\n+\tstruct device *dev = ice_pf_to_dev(pf);\n+\tstruct ice_hw *hw = &pf->hw;\n+\tstruct ice_fltr_desc desc;\n+\tstruct ice_vsi *ctrl_vsi;\n+\tenum ice_status status;\n+\tu8 *pkt, *frag_pkt;\n+\tbool has_frag;\n+\tint err;\n+\n+\tctrl_vsi = ice_get_ctrl_vsi(pf);\n+\tif (!ctrl_vsi)\n+\t\treturn -EINVAL;\n+\n+\tpkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);\n+\tif (!pkt)\n+\t\treturn -ENOMEM;\n+\tfrag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);\n+\tif (!frag_pkt) {\n+\t\terr = -ENOMEM;\n+\t\tgoto err_free;\n+\t}\n+\n+\tice_fdir_get_prgm_desc(hw, input, &desc, add);\n+\tstatus = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);\n+\tif (status) {\n+\t\terr = ice_status_to_errno(status);\n+\t\tgoto err_free_all;\n+\t}\n+\terr = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);\n+\tif (err)\n+\t\tgoto err_free_all;\n+\n+\t/* repeat for fragment packet */\n+\thas_frag = ice_fdir_has_frag(input->flow_type);\n+\tif (has_frag) {\n+\t\t/* does not return error */\n+\t\tice_fdir_get_prgm_desc(hw, input, &desc, add);\n+\t\tstatus = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,\n+\t\t\t\t\t\t is_tun);\n+\t\tif (status) {\n+\t\t\terr = ice_status_to_errno(status);\n+\t\t\tgoto err_frag;\n+\t\t}\n+\t\terr = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);\n+\t\tif (err)\n+\t\t\tgoto err_frag;\n+\t} else {\n+\t\tdevm_kfree(dev, frag_pkt);\n+\t}\n+\n+\treturn 0;\n+\n+err_free_all:\n+\tdevm_kfree(dev, frag_pkt);\n+err_free:\n+\tdevm_kfree(dev, pkt);\n+\treturn err;\n+\n+err_frag:\n+\tdevm_kfree(dev, frag_pkt);\n+\treturn err;\n+}\n+\n+/**\n+ * ice_fdir_write_all_fltr - send a flow director filter to the hardware\n+ * @pf: PF data structure\n+ * @input: filter structure\n+ * @add: true adds filter and false removed filter\n+ *\n+ * returns 0 on success and negative value on error\n+ */\n+static int\n+ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,\n+\t\t\tbool add)\n+{\n+\tu16 port_num;\n+\tint tun;\n+\n+\tfor (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {\n+\t\tbool is_tun = tun == ICE_FD_HW_SEG_TUN;\n+\t\tint err;\n+\n+\t\tif (is_tun && !ice_get_open_tunnel_port(&pf->hw, TNL_ALL,\n+\t\t\t\t\t\t\t&port_num))\n+\t\t\tcontinue;\n+\t\terr = ice_fdir_write_fltr(pf, input, add, is_tun);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t}\n+\treturn 0;\n+}\n+\n /**\n * ice_fdir_create_dflt_rules - create default perfect filters\n * @pf: PF data structure\n@@ -535,6 +921,7 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf)\n */\n void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)\n {\n+\tstruct ice_fdir_fltr *f_rule, *tmp;\n \tstruct ice_pf *pf = vsi->back;\n \tstruct ice_hw *hw = &pf->hw;\n \tenum ice_fltr_ptype flow;\n@@ -548,6 +935,13 @@ void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)\n \tmutex_lock(&hw->fdir_fltr_lock);\n \tif (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))\n \t\tgoto release_lock;\n+\tlist_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {\n+\t\t/* ignore return value */\n+\t\tice_fdir_write_all_fltr(pf, f_rule, false);\n+\t\tice_fdir_update_cntrs(hw, f_rule->flow_type, false);\n+\t\tlist_del(&f_rule->fltr_node);\n+\t\tdevm_kfree(ice_hw_to_dev(hw), f_rule);\n+\t}\n \n \tif (hw->fdir_prof)\n \t\tfor (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;\n@@ -558,3 +952,267 @@ void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)\n release_lock:\n \tmutex_unlock(&hw->fdir_fltr_lock);\n }\n+\n+/**\n+ * ice_fdir_update_list_entry - add or delete a filter from the filter list\n+ * @pf: PF structure\n+ * @input: filter structure\n+ * @fltr_idx: ethtool index of filter to modify\n+ *\n+ * returns 0 on success and negative on errors\n+ */\n+static int\n+ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,\n+\t\t\t int fltr_idx)\n+{\n+\tstruct ice_fdir_fltr *old_fltr;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tint err = -ENOENT;\n+\n+\t/* Do not update filters during reset */\n+\tif (ice_is_reset_in_progress(pf->state))\n+\t\treturn -EBUSY;\n+\n+\told_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);\n+\tif (old_fltr) {\n+\t\terr = ice_fdir_write_all_fltr(pf, old_fltr, false);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t\tice_fdir_update_cntrs(hw, old_fltr->flow_type, false);\n+\t\tif (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])\n+\t\t\t/* we just deleted the last filter of flow_type so we\n+\t\t\t * should also delete the HW filter info.\n+\t\t\t */\n+\t\t\tice_fdir_rem_flow(hw, ICE_BLK_FD, old_fltr->flow_type);\n+\t\tlist_del(&old_fltr->fltr_node);\n+\t\tdevm_kfree(ice_hw_to_dev(hw), old_fltr);\n+\t}\n+\tif (!input)\n+\t\treturn err;\n+\tice_fdir_list_add_fltr(hw, input);\n+\tice_fdir_update_cntrs(hw, input->flow_type, true);\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_del_fdir_ethtool - delete Flow Director filter\n+ * @vsi: pointer to target VSI\n+ * @cmd: command to add or delete Flow Director filter\n+ *\n+ * Returns 0 on success and negative values for failure\n+ */\n+int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)\n+{\n+\tstruct ethtool_rx_flow_spec *fsp =\n+\t\t(struct ethtool_rx_flow_spec *)&cmd->fs;\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tint val;\n+\n+\tif (!test_bit(ICE_FLAG_FD_ENA, pf->flags))\n+\t\treturn -EOPNOTSUPP;\n+\n+\t/* Do not delete filters during reset */\n+\tif (ice_is_reset_in_progress(pf->state)) {\n+\t\tdev_err(ice_pf_to_dev(pf), \"Device is resetting - deleting Flow Director filters not supported during reset\\n\");\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tif (test_bit(__ICE_FD_FLUSH_REQ, pf->state))\n+\t\treturn -EBUSY;\n+\n+\tmutex_lock(&hw->fdir_fltr_lock);\n+\tval = ice_fdir_update_list_entry(pf, NULL, fsp->location);\n+\tmutex_unlock(&hw->fdir_fltr_lock);\n+\n+\treturn val;\n+}\n+\n+/**\n+ * ice_set_fdir_input_set - Set the input set for Flow Director\n+ * @vsi: pointer to target VSI\n+ * @fsp: pointer to ethtool Rx flow specification\n+ * @input: filter structure\n+ */\n+static int\n+ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,\n+\t\t struct ice_fdir_fltr *input)\n+{\n+\tu16 dest_vsi, q_index = 0;\n+\tstruct ice_pf *pf;\n+\tstruct ice_hw *hw;\n+\tint flow_type;\n+\tu8 dest_ctl;\n+\n+\tif (!vsi || !fsp || !input)\n+\t\treturn -EINVAL;\n+\n+\tpf = vsi->back;\n+\thw = &pf->hw;\n+\n+\tdest_vsi = vsi->idx;\n+\tif (fsp->ring_cookie == RX_CLS_FLOW_DISC) {\n+\t\tdest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;\n+\t} else {\n+\t\tu32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);\n+\t\tu8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);\n+\n+\t\tif (vf) {\n+\t\t\tdev_err(ice_pf_to_dev(pf), \"Failed to add filter. Flow director filters are not supported on VF queues.\\n\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tif (ring >= vsi->num_rxq)\n+\t\t\treturn -EINVAL;\n+\n+\t\tdest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;\n+\t\tq_index = ring;\n+\t}\n+\n+\tinput->fltr_id = fsp->location;\n+\tinput->q_index = q_index;\n+\tflow_type = fsp->flow_type & ~FLOW_EXT;\n+\n+\tinput->dest_vsi = dest_vsi;\n+\tinput->dest_ctl = dest_ctl;\n+\tinput->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;\n+\tinput->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);\n+\tinput->flow_type = ice_ethtool_flow_to_fltr(flow_type);\n+\n+\tif (fsp->flow_type & FLOW_EXT) {\n+\t\tmemcpy(input->ext_data.usr_def, fsp->h_ext.data,\n+\t\t sizeof(input->ext_data.usr_def));\n+\t\tinput->ext_data.vlan_type = fsp->h_ext.vlan_etype;\n+\t\tinput->ext_data.vlan_tag = fsp->h_ext.vlan_tci;\n+\t\tmemcpy(input->ext_mask.usr_def, fsp->m_ext.data,\n+\t\t sizeof(input->ext_mask.usr_def));\n+\t\tinput->ext_mask.vlan_type = fsp->m_ext.vlan_etype;\n+\t\tinput->ext_mask.vlan_tag = fsp->m_ext.vlan_tci;\n+\t}\n+\n+\tswitch (flow_type) {\n+\tcase TCP_V4_FLOW:\n+\tcase UDP_V4_FLOW:\n+\tcase SCTP_V4_FLOW:\n+\t\tinput->ip.dst_port = fsp->h_u.tcp_ip4_spec.pdst;\n+\t\tinput->ip.src_port = fsp->h_u.tcp_ip4_spec.psrc;\n+\t\tinput->ip.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;\n+\t\tinput->ip.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;\n+\t\tinput->mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;\n+\t\tinput->mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;\n+\t\tinput->mask.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;\n+\t\tinput->mask.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;\n+\t\tbreak;\n+\tcase IPV4_USER_FLOW:\n+\t\tinput->ip.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;\n+\t\tinput->ip.src_ip = fsp->h_u.usr_ip4_spec.ip4src;\n+\t\tinput->ip.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;\n+\t\tinput->ip.proto = fsp->h_u.usr_ip4_spec.proto;\n+\t\tinput->ip.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver;\n+\t\tinput->ip.tos = fsp->h_u.usr_ip4_spec.tos;\n+\t\tinput->mask.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;\n+\t\tinput->mask.src_ip = fsp->m_u.usr_ip4_spec.ip4src;\n+\t\tinput->mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;\n+\t\tinput->mask.proto = fsp->m_u.usr_ip4_spec.proto;\n+\t\tinput->mask.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver;\n+\t\tinput->mask.tos = fsp->m_u.usr_ip4_spec.tos;\n+\t\tbreak;\n+\tdefault:\n+\t\t/* not doing un-parsed flow types */\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_add_fdir_ethtool - Add/Remove Flow Director filter\n+ * @vsi: pointer to target VSI\n+ * @cmd: command to add or delete Flow Director filter\n+ *\n+ * Returns 0 on success and negative values for failure\n+ */\n+int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)\n+{\n+\tstruct ethtool_rx_flow_spec *fsp;\n+\tstruct ice_fdir_fltr *input;\n+\tstruct device *dev;\n+\tstruct ice_pf *pf;\n+\tstruct ice_hw *hw;\n+\tint fltrs_needed;\n+\tu16 tunnel_port;\n+\tint ret;\n+\n+\tif (!vsi)\n+\t\treturn -EINVAL;\n+\n+\tpf = vsi->back;\n+\thw = &pf->hw;\n+\tdev = ice_pf_to_dev(pf);\n+\n+\tif (!test_bit(ICE_FLAG_FD_ENA, pf->flags))\n+\t\treturn -EOPNOTSUPP;\n+\n+\t/* Do not program filters during reset */\n+\tif (ice_is_reset_in_progress(pf->state)) {\n+\t\tdev_err(dev, \"Device is resetting - adding Flow Director filters not supported during reset\\n\");\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tfsp = (struct ethtool_rx_flow_spec *)&cmd->fs;\n+\n+\tif (fsp->flow_type & FLOW_MAC_EXT)\n+\t\treturn -EINVAL;\n+\n+\tret = ice_cfg_fdir_xtrct_seq(pf, fsp);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tif (fsp->location >= ice_get_fdir_cnt_all(hw)) {\n+\t\tdev_err(dev, \"Failed to add filter. The maximum number of flow director filters has been reached.\\n\");\n+\t\treturn -ENOSPC;\n+\t}\n+\n+\t/* return error if not an update and no available filters */\n+\tfltrs_needed = ice_get_open_tunnel_port(hw, TNL_ALL, &tunnel_port) ?\n+\t\t2 : 1;\n+\tif (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&\n+\t ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {\n+\t\tdev_err(dev, \"Failed to add filter. The maximum number of flow director filters has been reached.\\n\");\n+\t\treturn -ENOSPC;\n+\t}\n+\n+\tinput = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL);\n+\tif (!input)\n+\t\treturn -ENOMEM;\n+\n+\tret = ice_set_fdir_input_set(vsi, fsp, input);\n+\tif (ret)\n+\t\tgoto free_input;\n+\n+\tmutex_lock(&hw->fdir_fltr_lock);\n+\tif (ice_fdir_is_dup_fltr(hw, input)) {\n+\t\tret = -EINVAL;\n+\t\tgoto release_lock;\n+\t}\n+\n+\t/* input struct is added to the HW filter list */\n+\tice_fdir_update_list_entry(pf, input, fsp->location);\n+\n+\tret = ice_fdir_write_all_fltr(pf, input, true);\n+\tif (ret)\n+\t\tgoto remove_sw_rule;\n+\n+\tgoto release_lock;\n+\n+remove_sw_rule:\n+\tice_fdir_update_cntrs(hw, input->flow_type, false);\n+\tlist_del(&input->fltr_node);\n+release_lock:\n+\tmutex_unlock(&hw->fdir_fltr_lock);\n+free_input:\n+\tif (ret)\n+\t\tdevm_kfree(dev, input);\n+\n+\treturn ret;\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c\nindex 68405882f63f..505017c9f1e8 100644\n--- a/drivers/net/ethernet/intel/ice/ice_fdir.c\n+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c\n@@ -3,6 +3,261 @@\n \n #include \"ice_common.h\"\n \n+/* These are training packet headers used to program flow director filters. */\n+static const u8 ice_fdir_tcpv4_pkt[] = {\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,\n+\t0x00, 0x28, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x00,\n+\t0x20, 0x00, 0x00, 0x00, 0x00, 0x00\n+};\n+\n+static const u8 ice_fdir_udpv4_pkt[] = {\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,\n+\t0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00,\n+};\n+\n+static const u8 ice_fdir_sctpv4_pkt[] = {\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,\n+\t0x00, 0x20, 0x00, 0x00, 0x40, 0x00, 0x40, 0x84,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+};\n+\n+static const u8 ice_fdir_ipv4_pkt[] = {\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,\n+\t0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x10,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00\n+};\n+\n+static const u8 ice_fdir_tcp4_tun_pkt[] = {\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,\n+\t0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,\n+\t0x45, 0x00, 0x00, 0x28, 0x00, 0x00, 0x40, 0x00,\n+\t0x40, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x50, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,\n+};\n+\n+static const u8 ice_fdir_udp4_tun_pkt[] = {\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,\n+\t0x00, 0x4e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,\n+\t0x45, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00,\n+\t0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00,\n+};\n+\n+static const u8 ice_fdir_sctp4_tun_pkt[] = {\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,\n+\t0x00, 0x52, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,\n+\t0x45, 0x00, 0x00, 0x20, 0x00, 0x01, 0x00, 0x00,\n+\t0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+};\n+\n+static const u8 ice_fdir_ip4_tun_pkt[] = {\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,\n+\t0x00, 0x46, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,\n+\t0x45, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00,\n+\t0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n+\t0x00, 0x00, 0x00, 0x00,\n+};\n+\n+/* Flow Director no-op training packet table */\n+static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {\n+\t{\n+\t\tICE_FLTR_PTYPE_NONF_IPV4_TCP,\n+\t\tsizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt,\n+\t\tsizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt,\n+\t},\n+\t{\n+\t\tICE_FLTR_PTYPE_NONF_IPV4_UDP,\n+\t\tsizeof(ice_fdir_udpv4_pkt), ice_fdir_udpv4_pkt,\n+\t\tsizeof(ice_fdir_udp4_tun_pkt), ice_fdir_udp4_tun_pkt,\n+\t},\n+\t{\n+\t\tICE_FLTR_PTYPE_NONF_IPV4_SCTP,\n+\t\tsizeof(ice_fdir_sctpv4_pkt), ice_fdir_sctpv4_pkt,\n+\t\tsizeof(ice_fdir_sctp4_tun_pkt), ice_fdir_sctp4_tun_pkt,\n+\t},\n+\t{\n+\t\tICE_FLTR_PTYPE_NONF_IPV4_OTHER,\n+\t\tsizeof(ice_fdir_ipv4_pkt), ice_fdir_ipv4_pkt,\n+\t\tsizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt,\n+\t},\n+};\n+\n+#define ICE_FDIR_NUM_PKT ARRAY_SIZE(ice_fdir_pkt)\n+\n+/**\n+ * ice_set_dflt_val_fd_desc\n+ * @fd_fltr_ctx: pointer to fd filter descriptor\n+ */\n+static void ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx)\n+{\n+\tfd_fltr_ctx->comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;\n+\tfd_fltr_ctx->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;\n+\tfd_fltr_ctx->fd_space = ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST;\n+\tfd_fltr_ctx->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;\n+\tfd_fltr_ctx->evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE;\n+\tfd_fltr_ctx->toq = ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX;\n+\tfd_fltr_ctx->toq_prio = ICE_FXD_FLTR_QW0_TO_Q_PRIO1;\n+\tfd_fltr_ctx->dpu_recipe = ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT;\n+\tfd_fltr_ctx->drop = ICE_FXD_FLTR_QW0_DROP_NO;\n+\tfd_fltr_ctx->flex_prio = ICE_FXD_FLTR_QW0_FLEX_PRI_NONE;\n+\tfd_fltr_ctx->flex_mdid = ICE_FXD_FLTR_QW0_FLEX_MDID0;\n+\tfd_fltr_ctx->flex_val = ICE_FXD_FLTR_QW0_FLEX_VAL0;\n+\tfd_fltr_ctx->dtype = ICE_TX_DESC_DTYPE_FLTR_PROG;\n+\tfd_fltr_ctx->desc_prof_prio = ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO;\n+\tfd_fltr_ctx->desc_prof = ICE_FXD_FLTR_QW1_PROF_ZERO;\n+\tfd_fltr_ctx->swap = ICE_FXD_FLTR_QW1_SWAP_SET;\n+\tfd_fltr_ctx->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;\n+\tfd_fltr_ctx->fdid_mdid = ICE_FXD_FLTR_QW1_FDID_MDID_FD;\n+\tfd_fltr_ctx->fdid = ICE_FXD_FLTR_QW1_FDID_ZERO;\n+}\n+\n+/**\n+ * ice_set_fd_desc_val\n+ * @ctx: pointer to fd filter descriptor context\n+ * @fdir_desc: populated with fd filter descriptor values\n+ */\n+static void\n+ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *ctx,\n+\t\t struct ice_fltr_desc *fdir_desc)\n+{\n+\tu64 qword;\n+\n+\t/* prep QW0 of FD filter programming desc */\n+\tqword = ((u64)ctx->qindex << ICE_FXD_FLTR_QW0_QINDEX_S) &\n+\t\tICE_FXD_FLTR_QW0_QINDEX_M;\n+\tqword |= ((u64)ctx->comp_q << ICE_FXD_FLTR_QW0_COMP_Q_S) &\n+\t\t ICE_FXD_FLTR_QW0_COMP_Q_M;\n+\tqword |= ((u64)ctx->comp_report << ICE_FXD_FLTR_QW0_COMP_REPORT_S) &\n+\t\t ICE_FXD_FLTR_QW0_COMP_REPORT_M;\n+\tqword |= ((u64)ctx->fd_space << ICE_FXD_FLTR_QW0_FD_SPACE_S) &\n+\t\t ICE_FXD_FLTR_QW0_FD_SPACE_M;\n+\tqword |= ((u64)ctx->cnt_index << ICE_FXD_FLTR_QW0_STAT_CNT_S) &\n+\t\t ICE_FXD_FLTR_QW0_STAT_CNT_M;\n+\tqword |= ((u64)ctx->cnt_ena << ICE_FXD_FLTR_QW0_STAT_ENA_S) &\n+\t\t ICE_FXD_FLTR_QW0_STAT_ENA_M;\n+\tqword |= ((u64)ctx->evict_ena << ICE_FXD_FLTR_QW0_EVICT_ENA_S) &\n+\t\t ICE_FXD_FLTR_QW0_EVICT_ENA_M;\n+\tqword |= ((u64)ctx->toq << ICE_FXD_FLTR_QW0_TO_Q_S) &\n+\t\t ICE_FXD_FLTR_QW0_TO_Q_M;\n+\tqword |= ((u64)ctx->toq_prio << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) &\n+\t\t ICE_FXD_FLTR_QW0_TO_Q_PRI_M;\n+\tqword |= ((u64)ctx->dpu_recipe << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) &\n+\t\t ICE_FXD_FLTR_QW0_DPU_RECIPE_M;\n+\tqword |= ((u64)ctx->drop << ICE_FXD_FLTR_QW0_DROP_S) &\n+\t\t ICE_FXD_FLTR_QW0_DROP_M;\n+\tqword |= ((u64)ctx->flex_prio << ICE_FXD_FLTR_QW0_FLEX_PRI_S) &\n+\t\t ICE_FXD_FLTR_QW0_FLEX_PRI_M;\n+\tqword |= ((u64)ctx->flex_mdid << ICE_FXD_FLTR_QW0_FLEX_MDID_S) &\n+\t\t ICE_FXD_FLTR_QW0_FLEX_MDID_M;\n+\tqword |= ((u64)ctx->flex_val << ICE_FXD_FLTR_QW0_FLEX_VAL_S) &\n+\t\t ICE_FXD_FLTR_QW0_FLEX_VAL_M;\n+\tfdir_desc->qidx_compq_space_stat = cpu_to_le64(qword);\n+\n+\t/* prep QW1 of FD filter programming desc */\n+\tqword = ((u64)ctx->dtype << ICE_FXD_FLTR_QW1_DTYPE_S) &\n+\t\tICE_FXD_FLTR_QW1_DTYPE_M;\n+\tqword |= ((u64)ctx->pcmd << ICE_FXD_FLTR_QW1_PCMD_S) &\n+\t\t ICE_FXD_FLTR_QW1_PCMD_M;\n+\tqword |= ((u64)ctx->desc_prof_prio << ICE_FXD_FLTR_QW1_PROF_PRI_S) &\n+\t\t ICE_FXD_FLTR_QW1_PROF_PRI_M;\n+\tqword |= ((u64)ctx->desc_prof << ICE_FXD_FLTR_QW1_PROF_S) &\n+\t\t ICE_FXD_FLTR_QW1_PROF_M;\n+\tqword |= ((u64)ctx->fd_vsi << ICE_FXD_FLTR_QW1_FD_VSI_S) &\n+\t\t ICE_FXD_FLTR_QW1_FD_VSI_M;\n+\tqword |= ((u64)ctx->swap << ICE_FXD_FLTR_QW1_SWAP_S) &\n+\t\t ICE_FXD_FLTR_QW1_SWAP_M;\n+\tqword |= ((u64)ctx->fdid_prio << ICE_FXD_FLTR_QW1_FDID_PRI_S) &\n+\t\t ICE_FXD_FLTR_QW1_FDID_PRI_M;\n+\tqword |= ((u64)ctx->fdid_mdid << ICE_FXD_FLTR_QW1_FDID_MDID_S) &\n+\t\t ICE_FXD_FLTR_QW1_FDID_MDID_M;\n+\tqword |= ((u64)ctx->fdid << ICE_FXD_FLTR_QW1_FDID_S) &\n+\t\t ICE_FXD_FLTR_QW1_FDID_M;\n+\tfdir_desc->dtype_cmd_vsi_fdid = cpu_to_le64(qword);\n+}\n+\n+/**\n+ * ice_fdir_get_prgm_desc - set a fdir descriptor from a fdir filter struct\n+ * @hw: pointer to the hardware structure\n+ * @input: filter\n+ * @fdesc: filter descriptor\n+ * @add: if add is true, this is an add operation, false implies delete\n+ */\n+void\n+ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,\n+\t\t struct ice_fltr_desc *fdesc, bool add)\n+{\n+\tstruct ice_fd_fltr_desc_ctx fdir_fltr_ctx = { 0 };\n+\n+\t/* set default context info */\n+\tice_set_dflt_val_fd_desc(&fdir_fltr_ctx);\n+\n+\t/* change sideband filtering values */\n+\tfdir_fltr_ctx.fdid = input->fltr_id;\n+\tif (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {\n+\t\tfdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_YES;\n+\t\tfdir_fltr_ctx.qindex = 0;\n+\t} else {\n+\t\tfdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO;\n+\t\tfdir_fltr_ctx.qindex = input->q_index;\n+\t}\n+\tfdir_fltr_ctx.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;\n+\tfdir_fltr_ctx.cnt_index = input->cnt_index;\n+\tfdir_fltr_ctx.fd_vsi = ice_get_hw_vsi_num(hw, input->dest_vsi);\n+\tfdir_fltr_ctx.evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE;\n+\tfdir_fltr_ctx.toq_prio = 3;\n+\tfdir_fltr_ctx.pcmd = add ? ICE_FXD_FLTR_QW1_PCMD_ADD :\n+\t\tICE_FXD_FLTR_QW1_PCMD_REMOVE;\n+\tfdir_fltr_ctx.swap = ICE_FXD_FLTR_QW1_SWAP_NOT_SET;\n+\tfdir_fltr_ctx.comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;\n+\tfdir_fltr_ctx.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;\n+\tfdir_fltr_ctx.fdid_prio = 3;\n+\tfdir_fltr_ctx.desc_prof = 1;\n+\tfdir_fltr_ctx.desc_prof_prio = 3;\n+\tice_set_fd_desc_val(&fdir_fltr_ctx, fdesc);\n+}\n+\n /**\n * ice_alloc_fd_res_cntr - obtain counter resource for FD type\n * @hw: pointer to the hardware structure\n@@ -64,6 +319,150 @@ int ice_get_fdir_cnt_all(struct ice_hw *hw)\n \treturn hw->func_caps.fd_fltr_guar + hw->func_caps.fd_fltr_best_effort;\n }\n \n+/**\n+ * ice_pkt_insert_u16 - insert a be16 value into a memory buffer.\n+ * @pkt: packet buffer\n+ * @offset: offset into buffer\n+ * @data: 16 bit value to convert and insert into pkt at offset\n+ */\n+static void ice_pkt_insert_u16(u8 *pkt, int offset, __be16 data)\n+{\n+\tmemcpy(pkt + offset, &data, sizeof(data));\n+}\n+\n+/**\n+ * ice_pkt_insert_u32 - insert a be32 value into a memory buffer.\n+ * @pkt: packet buffer\n+ * @offset: offset into buffer\n+ * @data: 32 bit value to convert and insert into pkt at offset\n+ */\n+static void ice_pkt_insert_u32(u8 *pkt, int offset, __be32 data)\n+{\n+\tmemcpy(pkt + offset, &data, sizeof(data));\n+}\n+\n+/**\n+ * ice_fdir_get_gen_prgm_pkt - generate a training packet\n+ * @hw: pointer to the hardware structure\n+ * @input: flow director filter data structure\n+ * @pkt: pointer to return filter packet\n+ * @frag: generate a fragment packet\n+ * @tun: true implies generate a tunnel packet\n+ */\n+enum ice_status\n+ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,\n+\t\t\t u8 *pkt, bool frag, bool tun)\n+{\n+\tenum ice_fltr_ptype flow;\n+\tu16 tnl_port;\n+\tu8 *loc;\n+\tu16 idx;\n+\n+\tif (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {\n+\t\tswitch (input->ip.proto) {\n+\t\tcase IPPROTO_TCP:\n+\t\t\tflow = ICE_FLTR_PTYPE_NONF_IPV4_TCP;\n+\t\t\tbreak;\n+\t\tcase IPPROTO_UDP:\n+\t\t\tflow = ICE_FLTR_PTYPE_NONF_IPV4_UDP;\n+\t\t\tbreak;\n+\t\tcase IPPROTO_SCTP:\n+\t\t\tflow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;\n+\t\t\tbreak;\n+\t\tcase IPPROTO_IP:\n+\t\t\tflow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\treturn ICE_ERR_PARAM;\n+\t\t}\n+\t} else {\n+\t\tflow = input->flow_type;\n+\t}\n+\n+\tfor (idx = 0; idx < ICE_FDIR_NUM_PKT; idx++)\n+\t\tif (ice_fdir_pkt[idx].flow == flow)\n+\t\t\tbreak;\n+\tif (idx == ICE_FDIR_NUM_PKT)\n+\t\treturn ICE_ERR_PARAM;\n+\tif (!tun) {\n+\t\tmemcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);\n+\t\tloc = pkt;\n+\t} else {\n+\t\tif (!ice_get_open_tunnel_port(hw, TNL_ALL, &tnl_port))\n+\t\t\treturn ICE_ERR_DOES_NOT_EXIST;\n+\t\tif (!ice_fdir_pkt[idx].tun_pkt)\n+\t\t\treturn ICE_ERR_PARAM;\n+\t\tmemcpy(pkt, ice_fdir_pkt[idx].tun_pkt,\n+\t\t ice_fdir_pkt[idx].tun_pkt_len);\n+\t\tice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET,\n+\t\t\t\t htons(tnl_port));\n+\t\tloc = &pkt[ICE_FDIR_TUN_PKT_OFF];\n+\t}\n+\n+\t/* Reverse the src and dst, since the HW expects them to be from Tx\n+\t * perspective. The input from user is from Rx filter perspective.\n+\t */\n+\tswitch (flow) {\n+\tcase ICE_FLTR_PTYPE_NONF_IPV4_TCP:\n+\t\tice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,\n+\t\t\t\t input->ip.src_ip);\n+\t\tice_pkt_insert_u16(loc, ICE_IPV4_TCP_DST_PORT_OFFSET,\n+\t\t\t\t input->ip.src_port);\n+\t\tice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,\n+\t\t\t\t input->ip.dst_ip);\n+\t\tice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET,\n+\t\t\t\t input->ip.dst_port);\n+\t\tif (frag)\n+\t\t\tloc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF;\n+\t\tbreak;\n+\tcase ICE_FLTR_PTYPE_NONF_IPV4_UDP:\n+\t\tice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,\n+\t\t\t\t input->ip.src_ip);\n+\t\tice_pkt_insert_u16(loc, ICE_IPV4_UDP_DST_PORT_OFFSET,\n+\t\t\t\t input->ip.src_port);\n+\t\tice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,\n+\t\t\t\t input->ip.dst_ip);\n+\t\tice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET,\n+\t\t\t\t input->ip.dst_port);\n+\t\tbreak;\n+\tcase ICE_FLTR_PTYPE_NONF_IPV4_SCTP:\n+\t\tice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,\n+\t\t\t\t input->ip.src_ip);\n+\t\tice_pkt_insert_u16(loc, ICE_IPV4_SCTP_DST_PORT_OFFSET,\n+\t\t\t\t input->ip.src_port);\n+\t\tice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,\n+\t\t\t\t input->ip.dst_ip);\n+\t\tice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET,\n+\t\t\t\t input->ip.dst_port);\n+\t\tbreak;\n+\tcase ICE_FLTR_PTYPE_NONF_IPV4_OTHER:\n+\t\tice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,\n+\t\t\t\t input->ip.src_ip);\n+\t\tice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,\n+\t\t\t\t input->ip.dst_ip);\n+\t\tice_pkt_insert_u16(loc, ICE_IPV4_PROTO_OFFSET, 0);\n+\t\tbreak;\n+\tdefault:\n+\t\treturn ICE_ERR_PARAM;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_fdir_has_frag - does flow type have 2 ptypes\n+ * @flow: flow ptype\n+ *\n+ * returns true is there is a fragment packet for this ptype\n+ */\n+bool ice_fdir_has_frag(enum ice_fltr_ptype flow)\n+{\n+\tif (flow == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)\n+\t\treturn true;\n+\telse\n+\t\treturn false;\n+}\n+\n /**\n * ice_fdir_find_by_idx - find filter with idx\n * @hw: pointer to hardware structure\n@@ -85,3 +484,117 @@ ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx)\n \t}\n \treturn NULL;\n }\n+\n+/**\n+ * ice_fdir_list_add_fltr - add a new node to the flow director filter list\n+ * @hw: hardware structure\n+ * @fltr: filter node to add to structure\n+ */\n+void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *fltr)\n+{\n+\tstruct ice_fdir_fltr *rule, *parent = NULL;\n+\n+\tlist_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {\n+\t\t/* rule ID found or pass its spot in the list */\n+\t\tif (rule->fltr_id >= fltr->fltr_id)\n+\t\t\tbreak;\n+\t\tparent = rule;\n+\t}\n+\n+\tif (parent)\n+\t\tlist_add(&fltr->fltr_node, &parent->fltr_node);\n+\telse\n+\t\tlist_add(&fltr->fltr_node, &hw->fdir_list_head);\n+}\n+\n+/**\n+ * ice_fdir_update_cntrs - increment / decrement filter counter\n+ * @hw: pointer to hardware structure\n+ * @flow: filter flow type\n+ * @add: true implies filters added\n+ */\n+void\n+ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add)\n+{\n+\tint incr;\n+\n+\tincr = add ? 1 : -1;\n+\thw->fdir_active_fltr += incr;\n+\n+\tif (flow == ICE_FLTR_PTYPE_NONF_NONE || flow >= ICE_FLTR_PTYPE_MAX)\n+\t\tice_debug(hw, ICE_DBG_SW, \"Unknown filter type %d\\n\", flow);\n+\telse\n+\t\thw->fdir_fltr_cnt[flow] += incr;\n+}\n+\n+/**\n+ * ice_fdir_comp_rules - compare 2 filters\n+ * @a: a Flow Director filter data structure\n+ * @b: a Flow Director filter data structure\n+ *\n+ * Returns true if the filters match\n+ */\n+static bool\n+ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b)\n+{\n+\tenum ice_fltr_ptype flow_type = a->flow_type;\n+\n+\t/* The calling function already checks that the two filters have the\n+\t * same flow_type.\n+\t */\n+\tif (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||\n+\t flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||\n+\t flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) {\n+\t\tif (a->ip.dst_ip == b->ip.dst_ip &&\n+\t\t a->ip.src_ip == b->ip.src_ip &&\n+\t\t a->ip.dst_port == b->ip.dst_port &&\n+\t\t a->ip.src_port == b->ip.src_port)\n+\t\t\treturn true;\n+\t} else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {\n+\t\tif (a->ip.dst_ip == b->ip.dst_ip &&\n+\t\t a->ip.src_ip == b->ip.src_ip &&\n+\t\t a->ip.l4_header == b->ip.l4_header &&\n+\t\t a->ip.proto == b->ip.proto &&\n+\t\t a->ip.ip_ver == b->ip.ip_ver &&\n+\t\t a->ip.tos == b->ip.tos)\n+\t\t\treturn true;\n+\t}\n+\n+\treturn false;\n+}\n+\n+/**\n+ * ice_fdir_is_dup_fltr - test if filter is already in list for PF\n+ * @hw: hardware data structure\n+ * @input: Flow Director filter data structure\n+ *\n+ * Returns true if the filter is found in the list\n+ */\n+bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input)\n+{\n+\tstruct ice_fdir_fltr *rule;\n+\tbool ret = false;\n+\n+\tlist_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {\n+\t\tenum ice_fltr_ptype flow_type;\n+\n+\t\tif (rule->flow_type != input->flow_type)\n+\t\t\tcontinue;\n+\n+\t\tflow_type = input->flow_type;\n+\t\tif (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||\n+\t\t flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||\n+\t\t flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP ||\n+\t\t flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)\n+\t\t\tret = ice_fdir_comp_rules(rule, input);\n+\t\tif (ret) {\n+\t\t\tif (rule->fltr_id == input->fltr_id &&\n+\t\t\t rule->q_index != input->q_index)\n+\t\t\t\tret = false;\n+\t\t\telse\n+\t\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn ret;\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h\nindex d37171d1a306..cd02ba59603c 100644\n--- a/drivers/net/ethernet/intel/ice/ice_fdir.h\n+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h\n@@ -4,11 +4,70 @@\n #ifndef _ICE_FDIR_H_\n #define _ICE_FDIR_H_\n \n+#define ICE_FDIR_TUN_PKT_OFF\t\t50\n+#define ICE_FDIR_MAX_RAW_PKT_SIZE\t(512 + ICE_FDIR_TUN_PKT_OFF)\n+\n+/* macros for offsets into packets for flow director programming */\n+#define ICE_IPV4_SRC_ADDR_OFFSET\t26\n+#define ICE_IPV4_DST_ADDR_OFFSET\t30\n+#define ICE_IPV4_TCP_SRC_PORT_OFFSET\t34\n+#define ICE_IPV4_TCP_DST_PORT_OFFSET\t36\n+#define ICE_IPV4_UDP_SRC_PORT_OFFSET\t34\n+#define ICE_IPV4_UDP_DST_PORT_OFFSET\t36\n+#define ICE_IPV4_SCTP_SRC_PORT_OFFSET\t34\n+#define ICE_IPV4_SCTP_DST_PORT_OFFSET\t36\n+#define ICE_IPV4_PROTO_OFFSET\t\t23\n+#define ICE_IPV6_SRC_ADDR_OFFSET\t22\n+#define ICE_IPV6_DST_ADDR_OFFSET\t38\n+#define ICE_IPV6_TCP_SRC_PORT_OFFSET\t54\n+#define ICE_IPV6_TCP_DST_PORT_OFFSET\t56\n+#define ICE_IPV6_UDP_SRC_PORT_OFFSET\t54\n+#define ICE_IPV6_UDP_DST_PORT_OFFSET\t56\n+#define ICE_IPV6_SCTP_SRC_PORT_OFFSET\t54\n+#define ICE_IPV6_SCTP_DST_PORT_OFFSET\t56\n+/* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF\n+ * requests that the packet not be fragmented. MF indicates that a packet has\n+ * been fragmented.\n+ */\n+#define ICE_FDIR_IPV4_PKT_FLAG_DF\t\t0x20\n+\n enum ice_fltr_prgm_desc_dest {\n \tICE_FLTR_PRGM_DESC_DEST_DROP_PKT,\n \tICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX,\n };\n \n+enum ice_fltr_prgm_desc_fd_status {\n+\tICE_FLTR_PRGM_DESC_FD_STATUS_NONE,\n+\tICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID,\n+};\n+\n+/* Flow Director (FD) Filter Programming descriptor */\n+struct ice_fd_fltr_desc_ctx {\n+\tu32 fdid;\n+\tu16 qindex;\n+\tu16 cnt_index;\n+\tu16 fd_vsi;\n+\tu16 flex_val;\n+\tu8 comp_q;\n+\tu8 comp_report;\n+\tu8 fd_space;\n+\tu8 cnt_ena;\n+\tu8 evict_ena;\n+\tu8 toq;\n+\tu8 toq_prio;\n+\tu8 dpu_recipe;\n+\tu8 drop;\n+\tu8 flex_prio;\n+\tu8 flex_mdid;\n+\tu8 dtype;\n+\tu8 pcmd;\n+\tu8 desc_prof_prio;\n+\tu8 desc_prof;\n+\tu8 swap;\n+\tu8 fdid_prio;\n+\tu8 fdid_mdid;\n+};\n+\n struct ice_fdir_v4 {\n \t__be32 dst_ip;\n \t__be32 src_ip;\n@@ -47,13 +106,33 @@ struct ice_fdir_fltr {\n \tu32 fltr_id;\n };\n \n+/* Dummy packet filter definition structure */\n+struct ice_fdir_base_pkt {\n+\tenum ice_fltr_ptype flow;\n+\tu16 pkt_len;\n+\tconst u8 *pkt;\n+\tu16 tun_pkt_len;\n+\tconst u8 *tun_pkt;\n+};\n+\n enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);\n enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);\n enum ice_status\n ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);\n enum ice_status\n ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);\n+void\n+ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,\n+\t\t struct ice_fltr_desc *fdesc, bool add);\n+enum ice_status\n+ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,\n+\t\t\t u8 *pkt, bool frag, bool tun);\n int ice_get_fdir_cnt_all(struct ice_hw *hw);\n+bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);\n+bool ice_fdir_has_frag(enum ice_fltr_ptype flow);\n struct ice_fdir_fltr *\n ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx);\n+void\n+ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add);\n+void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);\n #endif /* _ICE_FDIR_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c\nindex fe2f04f706e7..16d2f599bd70 100644\n--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c\n+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c\n@@ -1632,6 +1632,34 @@ ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,\n \treturn false;\n }\n \n+/**\n+ * ice_get_open_tunnel_port - retrieve an open tunnel port\n+ * @hw: pointer to the HW structure\n+ * @type: tunnel type (TNL_ALL will return any open port)\n+ * @port: returns open port\n+ */\n+bool\n+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,\n+\t\t\t u16 *port)\n+{\n+\tbool res = false;\n+\tu16 i;\n+\n+\tmutex_lock(&hw->tnl_lock);\n+\n+\tfor (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)\n+\t\tif (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&\n+\t\t (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {\n+\t\t\t*port = hw->tnl.tbl[i].port;\n+\t\t\tres = true;\n+\t\t\tbreak;\n+\t\t}\n+\n+\tmutex_unlock(&hw->tnl_lock);\n+\n+\treturn res;\n+}\n+\n /**\n * ice_create_tunnel\n * @hw: pointer to the HW structure\n@@ -2332,6 +2360,12 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,\n \tu16 off;\n \tu8 i;\n \n+\t/* For FD, we don't want to re-use a existed profile with the same\n+\t * field vector and mask. This will cause rule interference.\n+\t */\n+\tif (blk == ICE_BLK_FD)\n+\t\treturn ICE_ERR_DOES_NOT_EXIST;\n+\n \tfor (i = 0; i < (u8)es->count; i++) {\n \t\toff = i * es->fvw;\n \ndiff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h\nindex 70db213c9fe3..568ea519af51 100644\n--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h\n+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h\n@@ -18,6 +18,9 @@\n \n #define ICE_PKG_CNT 4\n \n+bool\n+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,\n+\t\t\t u16 *port);\n enum ice_status\n ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);\n enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h\nindex dd96409673d4..30c28a4bcf2f 100644\n--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h\n+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h\n@@ -290,6 +290,9 @@\n #define GL_PWR_MODE_CTL\t\t\t\t0x000B820C\n #define GL_PWR_MODE_CTL_CAR_MAX_BW_S\t\t30\n #define GL_PWR_MODE_CTL_CAR_MAX_BW_M\t\tICE_M(0x3, 30)\n+#define GLQF_FD_CNT\t\t\t\t0x00460018\n+#define GLQF_FD_CNT_FD_BCNT_S\t\t\t16\n+#define GLQF_FD_CNT_FD_BCNT_M\t\t\tICE_M(0x7FFF, 16)\n #define GLQF_FD_SIZE\t\t\t\t0x00460010\n #define GLQF_FD_SIZE_FD_GSIZE_S\t\t\t0\n #define GLQF_FD_SIZE_FD_GSIZE_M\t\t\tICE_M(0x7FFF, 0)\n@@ -355,6 +358,9 @@\n #define GLV_TEPC(_VSI)\t\t\t\t(0x00312000 + ((_VSI) * 4))\n #define GLV_UPRCL(_i)\t\t\t\t(0x003B2000 + ((_i) * 8))\n #define GLV_UPTCL(_i)\t\t\t\t(0x0030A000 + ((_i) * 8))\n+#define VSIQF_FD_CNT(_VSI)\t\t\t(0x00464000 + ((_VSI) * 4))\n+#define VSIQF_FD_CNT_FD_GCNT_S\t\t\t0\n+#define VSIQF_FD_CNT_FD_GCNT_M\t\t\tICE_M(0x3FFF, 0)\n #define VSIQF_HKEY_MAX_INDEX\t\t\t12\n #define VSIQF_HLUT_MAX_INDEX\t\t\t15\n #define VFINT_DYN_CTLN(_i)\t\t\t(0x00003800 + ((_i) * 4))\ndiff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h\nindex 5d61acdec7ed..bd2cd3435768 100644\n--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h\n+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h\n@@ -40,6 +40,104 @@ union ice_32byte_rx_desc {\n \t} wb; /* writeback */\n };\n \n+struct ice_fltr_desc {\n+\t__le64 qidx_compq_space_stat;\n+\t__le64 dtype_cmd_vsi_fdid;\n+};\n+\n+#define ICE_FXD_FLTR_QW0_QINDEX_S\t0\n+#define ICE_FXD_FLTR_QW0_QINDEX_M\t(0x7FFULL << ICE_FXD_FLTR_QW0_QINDEX_S)\n+#define ICE_FXD_FLTR_QW0_COMP_Q_S\t11\n+#define ICE_FXD_FLTR_QW0_COMP_Q_M\tBIT_ULL(ICE_FXD_FLTR_QW0_COMP_Q_S)\n+#define ICE_FXD_FLTR_QW0_COMP_Q_ZERO\t0x0ULL\n+\n+#define ICE_FXD_FLTR_QW0_COMP_REPORT_S\t12\n+#define ICE_FXD_FLTR_QW0_COMP_REPORT_M\t\\\n+\t\t\t\t(0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S)\n+#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL\t0x1ULL\n+\n+#define ICE_FXD_FLTR_QW0_FD_SPACE_S\t14\n+#define ICE_FXD_FLTR_QW0_FD_SPACE_M\t(0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S)\n+#define ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST\t\t0x2ULL\n+\n+#define ICE_FXD_FLTR_QW0_STAT_CNT_S\t16\n+#define ICE_FXD_FLTR_QW0_STAT_CNT_M\t\\\n+\t\t\t\t(0x1FFFULL << ICE_FXD_FLTR_QW0_STAT_CNT_S)\n+#define ICE_FXD_FLTR_QW0_STAT_ENA_S\t29\n+#define ICE_FXD_FLTR_QW0_STAT_ENA_M\t(0x3ULL << ICE_FXD_FLTR_QW0_STAT_ENA_S)\n+#define ICE_FXD_FLTR_QW0_STAT_ENA_PKTS\t\t0x1ULL\n+\n+#define ICE_FXD_FLTR_QW0_EVICT_ENA_S\t31\n+#define ICE_FXD_FLTR_QW0_EVICT_ENA_M\tBIT_ULL(ICE_FXD_FLTR_QW0_EVICT_ENA_S)\n+#define ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE\t0x0ULL\n+#define ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE\t\t0x1ULL\n+\n+#define ICE_FXD_FLTR_QW0_TO_Q_S\t\t32\n+#define ICE_FXD_FLTR_QW0_TO_Q_M\t\t(0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_S)\n+#define ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX\t0x0ULL\n+\n+#define ICE_FXD_FLTR_QW0_TO_Q_PRI_S\t35\n+#define ICE_FXD_FLTR_QW0_TO_Q_PRI_M\t(0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_PRI_S)\n+#define ICE_FXD_FLTR_QW0_TO_Q_PRIO1\t0x1ULL\n+\n+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_S\t38\n+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_M\t\\\n+\t\t\t(0x3ULL << ICE_FXD_FLTR_QW0_DPU_RECIPE_S)\n+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT\t0x0ULL\n+\n+#define ICE_FXD_FLTR_QW0_DROP_S\t\t40\n+#define ICE_FXD_FLTR_QW0_DROP_M\t\tBIT_ULL(ICE_FXD_FLTR_QW0_DROP_S)\n+#define ICE_FXD_FLTR_QW0_DROP_NO\t0x0ULL\n+#define ICE_FXD_FLTR_QW0_DROP_YES\t0x1ULL\n+\n+#define ICE_FXD_FLTR_QW0_FLEX_PRI_S\t41\n+#define ICE_FXD_FLTR_QW0_FLEX_PRI_M\t(0x7ULL << ICE_FXD_FLTR_QW0_FLEX_PRI_S)\n+#define ICE_FXD_FLTR_QW0_FLEX_PRI_NONE\t0x0ULL\n+\n+#define ICE_FXD_FLTR_QW0_FLEX_MDID_S\t44\n+#define ICE_FXD_FLTR_QW0_FLEX_MDID_M\t(0xFULL << ICE_FXD_FLTR_QW0_FLEX_MDID_S)\n+#define ICE_FXD_FLTR_QW0_FLEX_MDID0\t0x0ULL\n+\n+#define ICE_FXD_FLTR_QW0_FLEX_VAL_S\t48\n+#define ICE_FXD_FLTR_QW0_FLEX_VAL_M\t\\\n+\t\t\t\t(0xFFFFULL << ICE_FXD_FLTR_QW0_FLEX_VAL_S)\n+#define ICE_FXD_FLTR_QW0_FLEX_VAL0\t0x0ULL\n+\n+#define ICE_FXD_FLTR_QW1_DTYPE_S\t0\n+#define ICE_FXD_FLTR_QW1_DTYPE_M\t(0xFULL << ICE_FXD_FLTR_QW1_DTYPE_S)\n+#define ICE_FXD_FLTR_QW1_PCMD_S\t\t4\n+#define ICE_FXD_FLTR_QW1_PCMD_M\t\tBIT_ULL(ICE_FXD_FLTR_QW1_PCMD_S)\n+#define ICE_FXD_FLTR_QW1_PCMD_ADD\t0x0ULL\n+#define ICE_FXD_FLTR_QW1_PCMD_REMOVE\t0x1ULL\n+\n+#define ICE_FXD_FLTR_QW1_PROF_PRI_S\t5\n+#define ICE_FXD_FLTR_QW1_PROF_PRI_M\t(0x7ULL << ICE_FXD_FLTR_QW1_PROF_PRI_S)\n+#define ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO\t0x0ULL\n+\n+#define ICE_FXD_FLTR_QW1_PROF_S\t\t8\n+#define ICE_FXD_FLTR_QW1_PROF_M\t\t(0x3FULL << ICE_FXD_FLTR_QW1_PROF_S)\n+#define ICE_FXD_FLTR_QW1_PROF_ZERO\t0x0ULL\n+\n+#define ICE_FXD_FLTR_QW1_FD_VSI_S\t14\n+#define ICE_FXD_FLTR_QW1_FD_VSI_M\t(0x3FFULL << ICE_FXD_FLTR_QW1_FD_VSI_S)\n+#define ICE_FXD_FLTR_QW1_SWAP_S\t\t24\n+#define ICE_FXD_FLTR_QW1_SWAP_M\t\tBIT_ULL(ICE_FXD_FLTR_QW1_SWAP_S)\n+#define ICE_FXD_FLTR_QW1_SWAP_NOT_SET\t0x0ULL\n+#define ICE_FXD_FLTR_QW1_SWAP_SET\t0x1ULL\n+\n+#define ICE_FXD_FLTR_QW1_FDID_PRI_S\t25\n+#define ICE_FXD_FLTR_QW1_FDID_PRI_M\t(0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S)\n+#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE\t0x1ULL\n+\n+#define ICE_FXD_FLTR_QW1_FDID_MDID_S\t28\n+#define ICE_FXD_FLTR_QW1_FDID_MDID_M\t(0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S)\n+#define ICE_FXD_FLTR_QW1_FDID_MDID_FD\t0x05ULL\n+\n+#define ICE_FXD_FLTR_QW1_FDID_S\t\t32\n+#define ICE_FXD_FLTR_QW1_FDID_M\t\t\\\n+\t\t\t(0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S)\n+#define ICE_FXD_FLTR_QW1_FDID_ZERO\t0x0ULL\n+\n struct ice_rx_ptype_decoded {\n \tu32 ptype:10;\n \tu32 known:1;\n@@ -346,6 +444,7 @@ struct ice_tx_desc {\n enum ice_tx_desc_dtype_value {\n \tICE_TX_DESC_DTYPE_DATA\t\t= 0x0,\n \tICE_TX_DESC_DTYPE_CTX\t\t= 0x1,\n+\tICE_TX_DESC_DTYPE_FLTR_PROG\t= 0x8,\n \t/* DESC_DONE - HW has completed write-back of descriptor */\n \tICE_TX_DESC_DTYPE_DESC_DONE\t= 0xF,\n };\n@@ -357,12 +456,14 @@ enum ice_tx_desc_cmd_bits {\n \tICE_TX_DESC_CMD_EOP\t\t\t= 0x0001,\n \tICE_TX_DESC_CMD_RS\t\t\t= 0x0002,\n \tICE_TX_DESC_CMD_IL2TAG1\t\t\t= 0x0008,\n+\tICE_TX_DESC_CMD_DUMMY\t\t\t= 0x0010,\n \tICE_TX_DESC_CMD_IIPT_IPV6\t\t= 0x0020,\n \tICE_TX_DESC_CMD_IIPT_IPV4\t\t= 0x0040,\n \tICE_TX_DESC_CMD_IIPT_IPV4_CSUM\t\t= 0x0060,\n \tICE_TX_DESC_CMD_L4T_EOFT_TCP\t\t= 0x0100,\n \tICE_TX_DESC_CMD_L4T_EOFT_SCTP\t\t= 0x0200,\n \tICE_TX_DESC_CMD_L4T_EOFT_UDP\t\t= 0x0300,\n+\tICE_TX_DESC_CMD_RE\t\t\t= 0x0400,\n };\n \n #define ICE_TXD_QW1_OFFSET_S\t16\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c\nindex df802f0d1938..4b1a7ab147f8 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.c\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c\n@@ -16,6 +16,88 @@\n #define ICE_RX_HDR_SIZE\t\t256\n \n #define FDIR_DESC_RXDID 0x40\n+#define ICE_FDIR_CLEAN_DELAY 10\n+\n+/**\n+ * ice_prgm_fdir_fltr - Program a Flow Director filter\n+ * @vsi: VSI to send dummy packet\n+ * @fdir_desc: flow director descriptor\n+ * @raw_packet: allocated buffer for flow director\n+ */\n+int\n+ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,\n+\t\t u8 *raw_packet)\n+{\n+\tstruct ice_tx_buf *tx_buf, *first;\n+\tstruct ice_fltr_desc *f_desc;\n+\tstruct ice_tx_desc *tx_desc;\n+\tstruct ice_ring *tx_ring;\n+\tstruct device *dev;\n+\tdma_addr_t dma;\n+\tu32 td_cmd;\n+\tu16 i;\n+\n+\t/* VSI and Tx ring */\n+\tif (!vsi)\n+\t\treturn -ENOENT;\n+\ttx_ring = vsi->tx_rings[0];\n+\tif (!tx_ring || !tx_ring->desc)\n+\t\treturn -ENOENT;\n+\tdev = tx_ring->dev;\n+\n+\t/* we are using two descriptors to add/del a filter and we can wait */\n+\tfor (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {\n+\t\tif (!i)\n+\t\t\treturn -EAGAIN;\n+\t\tmsleep_interruptible(1);\n+\t}\n+\n+\tdma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,\n+\t\t\t DMA_TO_DEVICE);\n+\n+\tif (dma_mapping_error(dev, dma))\n+\t\treturn -EINVAL;\n+\n+\t/* grab the next descriptor */\n+\ti = tx_ring->next_to_use;\n+\tfirst = &tx_ring->tx_buf[i];\n+\tf_desc = ICE_TX_FDIRDESC(tx_ring, i);\n+\tmemcpy(f_desc, fdir_desc, sizeof(*f_desc));\n+\n+\ti++;\n+\ti = (i < tx_ring->count) ? i : 0;\n+\ttx_desc = ICE_TX_DESC(tx_ring, i);\n+\ttx_buf = &tx_ring->tx_buf[i];\n+\n+\ti++;\n+\ttx_ring->next_to_use = (i < tx_ring->count) ? i : 0;\n+\n+\tmemset(tx_buf, 0, sizeof(*tx_buf));\n+\tdma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);\n+\tdma_unmap_addr_set(tx_buf, dma, dma);\n+\n+\ttx_desc->buf_addr = cpu_to_le64(dma);\n+\ttd_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |\n+\t\t ICE_TX_DESC_CMD_RE;\n+\n+\ttx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;\n+\ttx_buf->raw_buf = raw_packet;\n+\n+\ttx_desc->cmd_type_offset_bsz =\n+\t\tice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);\n+\n+\t/* Force memory write to complete before letting h/w know\n+\t * there are new descriptors to fetch.\n+\t */\n+\twmb();\n+\n+\t/* mark the data descriptor to be watched */\n+\tfirst->next_to_watch = tx_desc;\n+\n+\twritel(tx_ring->next_to_use, tx_ring->tail);\n+\n+\treturn 0;\n+}\n \n /**\n * ice_unmap_and_free_tx_buf - Release a Tx buffer\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h\nindex 2209583c993e..7c4030caeea4 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.h\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h\n@@ -380,6 +380,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring);\n void ice_free_tx_ring(struct ice_ring *tx_ring);\n void ice_free_rx_ring(struct ice_ring *rx_ring);\n int ice_napi_poll(struct napi_struct *napi, int budget);\n+int\n+ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,\n+\t\t u8 *raw_packet);\n int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget);\n void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring);\n #endif /* _ICE_TXRX_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h\nindex 5291b687a75f..6d574ddb2b1e 100644\n--- a/drivers/net/ethernet/intel/ice/ice_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_type.h\n@@ -632,6 +632,12 @@ struct ice_hw {\n \tstruct mutex fdir_fltr_lock;\t/* protect Flow Director */\n \tstruct list_head fdir_list_head;\n \n+\t/* Book-keeping of side-band filter count per flow-type.\n+\t * This is used to detect and handle input set changes for\n+\t * respective flow-type.\n+\t */\n+\tu16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX];\n+\n \tstruct ice_fd_hw_prof **fdir_prof;\n \tDECLARE_BITMAP(fdir_perfect_fltr, ICE_FLTR_PTYPE_MAX);\n \tstruct mutex rss_locks;\t/* protect RSS configuration */\n", "prefixes": [ "S42", "3/7" ] }