Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/706704/?format=api
{ "id": 706704, "url": "http://patchwork.ozlabs.org/api/patches/706704/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20161217134000.31640-4-bjorn.topel@gmail.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20161217134000.31640-4-bjorn.topel@gmail.com>", "list_archive_url": null, "date": "2016-12-17T13:39:59", "name": "[v4,3/4] i40e: Add XDP_TX support", "commit_ref": null, "pull_url": null, "state": "changes-requested", "archived": false, "hash": "36ab3fefcf9e270dee911fa067f3983addccaed7", "submitter": { "id": 70569, "url": "http://patchwork.ozlabs.org/api/people/70569/?format=api", "name": "Björn Töpel", "email": "bjorn.topel@gmail.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20161217134000.31640-4-bjorn.topel@gmail.com/mbox/", "series": [], "comments": "http://patchwork.ozlabs.org/api/patches/706704/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/706704/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@lists.osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Received": [ "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3tgpGS0DWYz9t25\n\tfor <incoming@patchwork.ozlabs.org>;\n\tSun, 18 Dec 2016 00:40:31 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 814F184680;\n\tSat, 17 Dec 2016 13:40:30 +0000 (UTC)", "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id o8a26d5YE8P2; Sat, 17 Dec 2016 13:40:21 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id A612C84CC6;\n\tSat, 17 Dec 2016 13:40:21 +0000 (UTC)", "from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n\tby ash.osuosl.org (Postfix) with ESMTP id 663F51C0F9B\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 17 Dec 2016 13:40:20 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id 5EB0685BF0\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 17 Dec 2016 13:40:20 +0000 (UTC)", "from fraxinus.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id e0d7OmJKOs01 for <intel-wired-lan@lists.osuosl.org>;\n\tSat, 17 Dec 2016 13:40:19 +0000 (UTC)", "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n\tby fraxinus.osuosl.org (Postfix) with ESMTPS id F0A9A85A7C\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 17 Dec 2016 13:40:18 +0000 (UTC)", "from fmsmga001.fm.intel.com ([10.253.24.23])\n\tby fmsmga104.fm.intel.com with ESMTP; 17 Dec 2016 05:40:18 -0800", "from jabolger-mobl1.ger.corp.intel.com (HELO\n\tbtopel-mobl1.intel.com) ([10.252.31.3])\n\tby fmsmga001.fm.intel.com with ESMTP; 17 Dec 2016 05:40:16 -0800" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "from auto-whitelisted by SQLgrey-1.7.6", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos; i=\"5.33,363,1477983600\"; d=\"scan'208\";\n\ta=\"1083168133\"", "From": "=?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= <bjorn.topel@gmail.com>", "To": "jeffrey.t.kirsher@intel.com,\n\tintel-wired-lan@lists.osuosl.org", "Date": "Sat, 17 Dec 2016 14:39:59 +0100", "Message-Id": "<20161217134000.31640-4-bjorn.topel@gmail.com>", "X-Mailer": "git-send-email 2.9.3", "In-Reply-To": "<20161217134000.31640-1-bjorn.topel@gmail.com>", "References": "<20161217134000.31640-1-bjorn.topel@gmail.com>", "MIME-Version": "1.0", "Cc": "daniel@iogearbox.net,\n\t=?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= <bjorn.topel@intel.com>,\n\tmagnus.karlsson@intel.com", "Subject": "[Intel-wired-lan] [PATCH v4 3/4] i40e: Add XDP_TX support", "X-BeenThere": "intel-wired-lan@lists.osuosl.org", "X-Mailman-Version": "2.1.18-1", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.lists.osuosl.org>", "List-Unsubscribe": "<http://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@lists.osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@lists.osuosl.org?subject=help>", "List-Subscribe": "<http://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=subscribe>", "Content-Type": "text/plain; charset=\"utf-8\"", "Content-Transfer-Encoding": "base64", "Errors-To": "intel-wired-lan-bounces@lists.osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@lists.osuosl.org>" }, "content": "From: Björn Töpel <bjorn.topel@intel.com>\n\nThis patch adds proper XDP_TX support.\n\nSigned-off-by: Björn Töpel <bjorn.topel@intel.com>\n---\n drivers/net/ethernet/intel/i40e/i40e.h | 5 +\n drivers/net/ethernet/intel/i40e/i40e_main.c | 294 +++++++++++++++++++++++-----\n drivers/net/ethernet/intel/i40e/i40e_txrx.c | 255 +++++++++++++++++++++---\n drivers/net/ethernet/intel/i40e/i40e_txrx.h | 5 +\n 4 files changed, 478 insertions(+), 81 deletions(-)", "diff": "diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h\nindex 5382d4782396..1b0fadaf6fc9 100644\n--- a/drivers/net/ethernet/intel/i40e/i40e.h\n+++ b/drivers/net/ethernet/intel/i40e/i40e.h\n@@ -589,6 +589,10 @@ struct i40e_vsi {\n \tstruct i40e_ring **rx_rings;\n \tstruct i40e_ring **tx_rings;\n \n+\t/* The XDP rings are Tx only, and follows the count of the\n+\t * regular rings, i.e. alloc_queue_pairs/num_queue_pairs\n+\t */\n+\tstruct i40e_ring **xdp_rings;\n \tbool xdp_enabled;\n \n \tu32 active_filters;\n@@ -666,6 +670,7 @@ struct i40e_q_vector {\n \n \tstruct i40e_ring_container rx;\n \tstruct i40e_ring_container tx;\n+\tstruct i40e_ring_container xdp;\n \n \tu8 num_ringpairs;\t/* total number of ring pairs in vector */\n \ndiff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c\nindex 86bd2131d2bc..efb95fb851f4 100644\n--- a/drivers/net/ethernet/intel/i40e/i40e_main.c\n+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c\n@@ -107,6 +107,18 @@ MODULE_VERSION(DRV_VERSION);\n static struct workqueue_struct *i40e_wq;\n \n /**\n+ * i40e_alloc_queue_pairs_xdp_vsi - required # of XDP queue pairs\n+ * @vsi: pointer to a vsi\n+ **/\n+static u16 i40e_alloc_queue_pairs_xdp_vsi(const struct i40e_vsi *vsi)\n+{\n+\tif (i40e_enabled_xdp_vsi(vsi))\n+\t\treturn vsi->alloc_queue_pairs;\n+\n+\treturn 0;\n+}\n+\n+/**\n * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code\n * @hw: pointer to the HW structure\n * @mem: ptr to mem struct to fill out\n@@ -2886,6 +2898,12 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)\n \tfor (i = 0; i < vsi->num_queue_pairs && !err; i++)\n \t\terr = i40e_setup_tx_descriptors(vsi->tx_rings[i]);\n \n+\tif (!i40e_enabled_xdp_vsi(vsi))\n+\t\treturn err;\n+\n+\tfor (i = 0; i < vsi->num_queue_pairs && !err; i++)\n+\t\terr = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);\n+\n \treturn err;\n }\n \n@@ -2899,12 +2917,17 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)\n {\n \tint i;\n \n-\tif (!vsi->tx_rings)\n-\t\treturn;\n+\tif (vsi->tx_rings) {\n+\t\tfor (i = 0; i < vsi->num_queue_pairs; i++)\n+\t\t\tif (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)\n+\t\t\t\ti40e_free_tx_resources(vsi->tx_rings[i]);\n+\t}\n \n-\tfor (i = 0; i < vsi->num_queue_pairs; i++)\n-\t\tif (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)\n-\t\t\ti40e_free_tx_resources(vsi->tx_rings[i]);\n+\tif (vsi->xdp_rings) {\n+\t\tfor (i = 0; i < vsi->num_queue_pairs; i++)\n+\t\t\tif (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)\n+\t\t\t\ti40e_free_tx_resources(vsi->xdp_rings[i]);\n+\t}\n }\n \n /**\n@@ -3170,6 +3193,12 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)\n \tfor (i = 0; (i < vsi->num_queue_pairs) && !err; i++)\n \t\terr = i40e_configure_tx_ring(vsi->tx_rings[i]);\n \n+\tif (!i40e_enabled_xdp_vsi(vsi))\n+\t\treturn err;\n+\n+\tfor (i = 0; (i < vsi->num_queue_pairs) && !err; i++)\n+\t\terr = i40e_configure_tx_ring(vsi->xdp_rings[i]);\n+\n \treturn err;\n }\n \n@@ -3318,7 +3347,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)\n \tstruct i40e_hw *hw = &pf->hw;\n \tu16 vector;\n \tint i, q;\n-\tu32 qp;\n+\tu32 qp, qp_idx = 0;\n \n \t/* The interrupt indexing is offset by 1 in the PFINT_ITRn\n \t * and PFINT_LNKLSTn registers, e.g.:\n@@ -3345,16 +3374,33 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)\n \t\twr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);\n \t\tfor (q = 0; q < q_vector->num_ringpairs; q++) {\n \t\t\tu32 val;\n+\t\t\tu32 nqp = qp;\n+\n+\t\t\tif (i40e_enabled_xdp_vsi(vsi)) {\n+\t\t\t\tnqp = vsi->base_queue +\n+\t\t\t\t vsi->xdp_rings[qp_idx]->queue_index;\n+\t\t\t}\n \n \t\t\tval = I40E_QINT_RQCTL_CAUSE_ENA_MASK |\n-\t\t\t (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |\n-\t\t\t (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |\n-\t\t\t (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|\n+\t\t\t (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |\n+\t\t\t (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |\n+\t\t\t (nqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |\n \t\t\t (I40E_QUEUE_TYPE_TX\n \t\t\t\t << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);\n \n \t\t\twr32(hw, I40E_QINT_RQCTL(qp), val);\n \n+\t\t\tif (i40e_enabled_xdp_vsi(vsi)) {\n+\t\t\t\tval = I40E_QINT_TQCTL_CAUSE_ENA_MASK |\n+\t\t\t\t (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |\n+\t\t\t\t (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |\n+\t\t\t\t (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |\n+\t\t\t\t (I40E_QUEUE_TYPE_TX\n+\t\t\t\t << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);\n+\n+\t\t\t\twr32(hw, I40E_QINT_TQCTL(nqp), val);\n+\t\t\t}\n+\n \t\t\tval = I40E_QINT_TQCTL_CAUSE_ENA_MASK |\n \t\t\t (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |\n \t\t\t (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |\n@@ -3369,6 +3415,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)\n \n \t\t\twr32(hw, I40E_QINT_TQCTL(qp), val);\n \t\t\tqp++;\n+\t\t\tqp_idx++;\n \t\t}\n \t}\n \n@@ -3422,7 +3469,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)\n \tstruct i40e_q_vector *q_vector = vsi->q_vectors[0];\n \tstruct i40e_pf *pf = vsi->back;\n \tstruct i40e_hw *hw = &pf->hw;\n-\tu32 val;\n+\tu32 val, nqp = 0;\n \n \t/* set the ITR configuration */\n \tq_vector->itr_countdown = ITR_COUNTDOWN_START;\n@@ -3438,13 +3485,28 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)\n \t/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */\n \twr32(hw, I40E_PFINT_LNKLST0, 0);\n \n+\tif (i40e_enabled_xdp_vsi(vsi)) {\n+\t\tnqp = vsi->base_queue +\n+\t\t vsi->xdp_rings[0]->queue_index;\n+\t}\n+\n \t/* Associate the queue pair to the vector and enable the queue int */\n-\tval = I40E_QINT_RQCTL_CAUSE_ENA_MASK\t\t |\n-\t (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |\n+\tval = I40E_QINT_RQCTL_CAUSE_ENA_MASK\t\t\t|\n+\t (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)\t|\n+\t (nqp\t << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |\n \t (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);\n \n \twr32(hw, I40E_QINT_RQCTL(0), val);\n \n+\tif (i40e_enabled_xdp_vsi(vsi)) {\n+\t\tval = I40E_QINT_TQCTL_CAUSE_ENA_MASK\t\t |\n+\t\t (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |\n+\t\t (I40E_QUEUE_TYPE_TX\n+\t\t << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);\n+\n+\t wr32(hw, I40E_QINT_TQCTL(nqp), val);\n+\t}\n+\n \tval = I40E_QINT_TQCTL_CAUSE_ENA_MASK\t\t |\n \t (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |\n \t (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);\n@@ -3611,6 +3673,10 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)\n \tfor (i = 0; i < vsi->num_queue_pairs; i++) {\n \t\twr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);\n \t\twr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);\n+\t\tif (i40e_enabled_xdp_vsi(vsi)) {\n+\t\t\twr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx),\n+\t\t\t 0);\n+\t\t}\n \t}\n \n \tif (pf->flags & I40E_FLAG_MSIX_ENABLED) {\n@@ -3920,6 +3986,24 @@ static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)\n }\n \n /**\n+ * i40e_map_vector_to_xdp_ring - Assigns the XDP Tx queue to the vector\n+ * @vsi: the VSI being configured\n+ * @v_idx: vector index\n+ * @xdp_idx: XDP Tx queue index\n+ **/\n+static void i40e_map_vector_to_xdp_ring(struct i40e_vsi *vsi, int v_idx,\n+\t\t\t\t\tint xdp_idx)\n+{\n+\tstruct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];\n+\tstruct i40e_ring *xdp_ring = vsi->xdp_rings[xdp_idx];\n+\n+\txdp_ring->q_vector = q_vector;\n+\txdp_ring->next = q_vector->xdp.ring;\n+\tq_vector->xdp.ring = xdp_ring;\n+\tq_vector->xdp.count++;\n+}\n+\n+/**\n * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors\n * @vsi: the VSI being configured\n *\n@@ -3952,11 +4036,17 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)\n \n \t\tq_vector->rx.count = 0;\n \t\tq_vector->tx.count = 0;\n+\t\tq_vector->xdp.count = 0;\n \t\tq_vector->rx.ring = NULL;\n \t\tq_vector->tx.ring = NULL;\n+\t\tq_vector->xdp.ring = NULL;\n \n \t\twhile (num_ringpairs--) {\n \t\t\ti40e_map_vector_to_qp(vsi, v_start, qp_idx);\n+\t\t\tif (i40e_enabled_xdp_vsi(vsi)) {\n+\t\t\t\ti40e_map_vector_to_xdp_ring(vsi, v_start,\n+\t\t\t\t\t\t\t qp_idx);\n+\t\t\t}\n \t\t\tqp_idx++;\n \t\t\tqp_remaining--;\n \t\t}\n@@ -4050,56 +4140,82 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)\n }\n \n /**\n- * i40e_vsi_control_tx - Start or stop a VSI's rings\n+ * i40e_vsi_control_txq - Start or stop a VSI's queue\n * @vsi: the VSI being configured\n * @enable: start or stop the rings\n+ * @pf_q: the PF queue\n **/\n-static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)\n+static int i40e_vsi_control_txq(struct i40e_vsi *vsi, bool enable, int pf_q)\n {\n \tstruct i40e_pf *pf = vsi->back;\n \tstruct i40e_hw *hw = &pf->hw;\n-\tint i, j, pf_q, ret = 0;\n+\tint j, ret = 0;\n \tu32 tx_reg;\n \n-\tpf_q = vsi->base_queue;\n-\tfor (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {\n+\t/* warn the TX unit of coming changes */\n+\ti40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);\n+\tif (!enable)\n+\t\tusleep_range(10, 20);\n \n-\t\t/* warn the TX unit of coming changes */\n-\t\ti40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);\n-\t\tif (!enable)\n-\t\t\tusleep_range(10, 20);\n+\tfor (j = 0; j < 50; j++) {\n+\t\ttx_reg = rd32(hw, I40E_QTX_ENA(pf_q));\n+\t\tif (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==\n+\t\t ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))\n+\t\t\tbreak;\n+\t\tusleep_range(1000, 2000);\n+\t}\n+\t/* Skip if the queue is already in the requested state */\n+\tif (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))\n+\t\treturn 0;\n \n-\t\tfor (j = 0; j < 50; j++) {\n-\t\t\ttx_reg = rd32(hw, I40E_QTX_ENA(pf_q));\n-\t\t\tif (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==\n-\t\t\t ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))\n-\t\t\t\tbreak;\n-\t\t\tusleep_range(1000, 2000);\n-\t\t}\n-\t\t/* Skip if the queue is already in the requested state */\n-\t\tif (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))\n-\t\t\tcontinue;\n+\t/* turn on/off the queue */\n+\tif (enable) {\n+\t\twr32(hw, I40E_QTX_HEAD(pf_q), 0);\n+\t\ttx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;\n+\t} else {\n+\t\ttx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;\n+\t}\n \n-\t\t/* turn on/off the queue */\n-\t\tif (enable) {\n-\t\t\twr32(hw, I40E_QTX_HEAD(pf_q), 0);\n-\t\t\ttx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;\n-\t\t} else {\n-\t\t\ttx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;\n-\t\t}\n+\twr32(hw, I40E_QTX_ENA(pf_q), tx_reg);\n+\t/* No waiting for the Tx queue to disable */\n+\tif (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))\n+\t\treturn 0;\n \n-\t\twr32(hw, I40E_QTX_ENA(pf_q), tx_reg);\n-\t\t/* No waiting for the Tx queue to disable */\n-\t\tif (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))\n-\t\t\tcontinue;\n+\t/* wait for the change to finish */\n+\tret = i40e_pf_txq_wait(pf, pf_q, enable);\n+\tif (ret) {\n+\t\tdev_info(&pf->pdev->dev,\n+\t\t\t \"VSI seid %d Tx ring %d %sable timeout\\n\",\n+\t\t\t vsi->seid, pf_q, (enable ? \"en\" : \"dis\"));\n+\t\treturn ret;\n+\t}\n+\treturn 0;\n+}\n \n-\t\t/* wait for the change to finish */\n-\t\tret = i40e_pf_txq_wait(pf, pf_q, enable);\n-\t\tif (ret) {\n-\t\t\tdev_info(&pf->pdev->dev,\n-\t\t\t\t \"VSI seid %d Tx ring %d %sable timeout\\n\",\n-\t\t\t\t vsi->seid, pf_q, (enable ? \"en\" : \"dis\"));\n+/**\n+ * i40e_vsi_control_tx - Start or stop a VSI's rings\n+ * @vsi: the VSI being configured\n+ * @enable: start or stop the rings\n+ **/\n+static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)\n+{\n+\tstruct i40e_pf *pf = vsi->back;\n+\tstruct i40e_hw *hw = &pf->hw;\n+\tint i, pf_q, ret = 0;\n+\n+\tpf_q = vsi->base_queue;\n+\tfor (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {\n+\t\tret = i40e_vsi_control_txq(vsi, enable, pf_q);\n+\t\tif (ret)\n \t\t\tbreak;\n+\t}\n+\n+\tif (!ret && i40e_enabled_xdp_vsi(vsi)) {\n+\t\tfor (i = 0; i < vsi->num_queue_pairs; i++) {\n+\t\t\tpf_q = vsi->base_queue + vsi->xdp_rings[i]->queue_index;\n+\t\t\tret = i40e_vsi_control_txq(vsi, enable, pf_q);\n+\t\t\tif (ret)\n+\t\t\t\tbreak;\n \t\t}\n \t}\n \n@@ -4360,6 +4476,9 @@ static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)\n \ti40e_for_each_ring(ring, q_vector->rx)\n \t\tring->q_vector = NULL;\n \n+\ti40e_for_each_ring(ring, q_vector->xdp)\n+\t\tring->q_vector = NULL;\n+\n \t/* only VSI w/ an associated netdev is set up w/ NAPI */\n \tif (vsi->netdev)\n \t\tnetif_napi_del(&q_vector->napi);\n@@ -4583,6 +4702,21 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)\n \t\t}\n \t}\n \n+\tif (!i40e_enabled_xdp_vsi(vsi))\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < vsi->num_queue_pairs; i++) {\n+\t\tpf_q = vsi->base_queue + vsi->xdp_rings[i]->queue_index;\n+\t\t/* Check and wait for the disable status of the queue */\n+\t\tret = i40e_pf_txq_wait(pf, pf_q, false);\n+\t\tif (ret) {\n+\t\t\tdev_info(&pf->pdev->dev,\n+\t\t\t\t \"VSI seid %d XDP Tx ring %d disable timeout\\n\",\n+\t\t\t\t vsi->seid, pf_q);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n \treturn 0;\n }\n \n@@ -5540,6 +5674,8 @@ void i40e_down(struct i40e_vsi *vsi)\n \n \tfor (i = 0; i < vsi->num_queue_pairs; i++) {\n \t\ti40e_clean_tx_ring(vsi->tx_rings[i]);\n+\t\tif (i40e_enabled_xdp_vsi(vsi))\n+\t\t\ti40e_clean_tx_ring(vsi->xdp_rings[i]);\n \t\ti40e_clean_rx_ring(vsi->rx_rings[i]);\n \t}\n \n@@ -7542,6 +7678,16 @@ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)\n \t\treturn -ENOMEM;\n \tvsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];\n \n+\tif (i40e_enabled_xdp_vsi(vsi)) {\n+\t\tsize = sizeof(struct i40e_ring *) *\n+\t\t i40e_alloc_queue_pairs_xdp_vsi(vsi);\n+\t\tvsi->xdp_rings = kzalloc(size, GFP_KERNEL);\n+\t\tif (!vsi->xdp_rings) {\n+\t\t\tret = -ENOMEM;\n+\t\t\tgoto err_xdp_rings;\n+\t\t}\n+\t}\n+\n \tif (alloc_qvectors) {\n \t\t/* allocate memory for q_vector pointers */\n \t\tsize = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;\n@@ -7554,6 +7700,8 @@ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)\n \treturn ret;\n \n err_vectors:\n+\tkfree(vsi->xdp_rings);\n+err_xdp_rings:\n \tkfree(vsi->tx_rings);\n \treturn ret;\n }\n@@ -7660,6 +7808,8 @@ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)\n \tkfree(vsi->tx_rings);\n \tvsi->tx_rings = NULL;\n \tvsi->rx_rings = NULL;\n+\tkfree(vsi->xdp_rings);\n+\tvsi->xdp_rings = NULL;\n }\n \n /**\n@@ -7745,6 +7895,13 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)\n \t\t\tvsi->rx_rings[i] = NULL;\n \t\t}\n \t}\n+\n+\tif (vsi->xdp_rings && vsi->xdp_rings[0]) {\n+\t\tfor (i = 0; i < vsi->alloc_queue_pairs; i++) {\n+\t\t\tkfree_rcu(vsi->xdp_rings[i], rcu);\n+\t\t\tvsi->xdp_rings[i] = NULL;\n+\t\t}\n+\t}\n }\n \n /**\n@@ -7792,6 +7949,31 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)\n \t\tvsi->rx_rings[i] = rx_ring;\n \t}\n \n+\tif (!i40e_enabled_xdp_vsi(vsi))\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < vsi->alloc_queue_pairs; i++) {\n+\t\ttx_ring = kzalloc(sizeof(*tx_ring), GFP_KERNEL);\n+\t\tif (!tx_ring)\n+\t\t\tgoto err_out;\n+\n+\t\ttx_ring->queue_index = vsi->alloc_queue_pairs + i;\n+\t\ttx_ring->reg_idx = vsi->base_queue + vsi->alloc_queue_pairs + i;\n+\t\ttx_ring->ring_active = false;\n+\t\ttx_ring->vsi = vsi;\n+\t\ttx_ring->netdev = NULL;\n+\t\ttx_ring->dev = &pf->pdev->dev;\n+\t\ttx_ring->count = vsi->num_desc;\n+\t\ttx_ring->size = 0;\n+\t\ttx_ring->dcb_tc = 0;\n+\t\tif (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)\n+\t\t\ttx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;\n+\t\ttx_ring->tx_itr_setting = pf->tx_itr_default;\n+\t\ttx_ring->xdp_sibling = vsi->rx_rings[i];\n+\t\tvsi->xdp_rings[i] = tx_ring;\n+\t\tvsi->rx_rings[i]->xdp_sibling = tx_ring;\n+\t}\n+\n \treturn 0;\n \n err_out:\n@@ -10035,6 +10217,7 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)\n \tstruct i40e_pf *pf;\n \tu8 enabled_tc;\n \tint ret;\n+\tu16 alloc_queue_pairs;\n \n \tif (!vsi)\n \t\treturn NULL;\n@@ -10050,11 +10233,13 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)\n \tif (ret)\n \t\tgoto err_vsi;\n \n-\tret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);\n+\talloc_queue_pairs = vsi->alloc_queue_pairs +\n+\t\t\t i40e_alloc_queue_pairs_xdp_vsi(vsi);\n+\tret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);\n \tif (ret < 0) {\n \t\tdev_info(&pf->pdev->dev,\n \t\t\t \"failed to get tracking for %d queues for VSI %d err %d\\n\",\n-\t\t\t vsi->alloc_queue_pairs, vsi->seid, ret);\n+\t\t\t alloc_queue_pairs, vsi->seid, ret);\n \t\tgoto err_vsi;\n \t}\n \tvsi->base_queue = ret;\n@@ -10112,6 +10297,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,\n \tstruct i40e_veb *veb = NULL;\n \tint ret, i;\n \tint v_idx;\n+\tu16 alloc_queue_pairs;\n \n \t/* The requested uplink_seid must be either\n \t * - the PF's port seid\n@@ -10196,13 +10382,15 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,\n \t\tpf->lan_vsi = v_idx;\n \telse if (type == I40E_VSI_SRIOV)\n \t\tvsi->vf_id = param1;\n+\n+\talloc_queue_pairs = vsi->alloc_queue_pairs +\n+\t\t\t i40e_alloc_queue_pairs_xdp_vsi(vsi);\n \t/* assign it some queues */\n-\tret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,\n-\t\t\t\tvsi->idx);\n+\tret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs,\tvsi->idx);\n \tif (ret < 0) {\n \t\tdev_info(&pf->pdev->dev,\n \t\t\t \"failed to get tracking for %d queues for VSI %d err=%d\\n\",\n-\t\t\t vsi->alloc_queue_pairs, vsi->seid, ret);\n+\t\t\t alloc_queue_pairs, vsi->seid, ret);\n \t\tgoto err_vsi;\n \t}\n \tvsi->base_queue = ret;\ndiff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c\nindex ad57c406c5f7..14d84509a3cc 100644\n--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c\n+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c\n@@ -525,6 +525,8 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,\n \tif (tx_buffer->skb) {\n \t\tif (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)\n \t\t\tkfree(tx_buffer->raw_buf);\n+\t\telse if (tx_buffer->tx_flags & I40E_TX_FLAGS_XDP)\n+\t\t\tput_page(tx_buffer->page);\n \t\telse\n \t\t\tdev_kfree_skb_any(tx_buffer->skb);\n \t\tif (dma_unmap_len(tx_buffer, len))\n@@ -767,6 +769,98 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,\n \treturn !!budget;\n }\n \n+static bool i40e_clean_xdp_irq(struct i40e_vsi *vsi,\n+\t\t\t struct i40e_ring *tx_ring)\n+{\n+\tu16 i = tx_ring->next_to_clean;\n+\tstruct i40e_tx_buffer *tx_buf;\n+\tstruct i40e_tx_desc *tx_head;\n+\tstruct i40e_tx_desc *tx_desc;\n+\tunsigned int total_bytes = 0, total_packets = 0;\n+\tunsigned int budget = vsi->work_limit;\n+\n+\ttx_buf = &tx_ring->tx_bi[i];\n+\ttx_desc = I40E_TX_DESC(tx_ring, i);\n+\ti -= tx_ring->count;\n+\n+\ttx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));\n+\n+\tdo {\n+\t\tstruct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;\n+\n+\t\t/* if next_to_watch is not set then there is no work pending */\n+\t\tif (!eop_desc)\n+\t\t\tbreak;\n+\n+\t\t/* prevent any other reads prior to eop_desc */\n+\t\tread_barrier_depends();\n+\n+\t\t/* we have caught up to head, no work left to do */\n+\t\tif (tx_head == tx_desc)\n+\t\t\tbreak;\n+\n+\t\t/* clear next_to_watch to prevent false hangs */\n+\t\ttx_buf->next_to_watch = NULL;\n+\n+\t\t/* update the statistics for this packet */\n+\t\ttotal_bytes += tx_buf->bytecount;\n+\t\ttotal_packets += tx_buf->gso_segs;\n+\n+\t\tput_page(tx_buf->page);\n+\n+\t\t/* unmap skb header data */\n+\t\tdma_unmap_single(tx_ring->dev,\n+\t\t\t\t dma_unmap_addr(tx_buf, dma),\n+\t\t\t\t dma_unmap_len(tx_buf, len),\n+\t\t\t\t DMA_TO_DEVICE);\n+\n+\t\t/* clear tx_buffer data */\n+\t\ttx_buf->skb = NULL;\n+\t\tdma_unmap_len_set(tx_buf, len, 0);\n+\n+\t\t/* move us one more past the eop_desc for start of next pkt */\n+\t\ttx_buf++;\n+\t\ttx_desc++;\n+\t\ti++;\n+\t\tif (unlikely(!i)) {\n+\t\t\ti -= tx_ring->count;\n+\t\t\ttx_buf = tx_ring->tx_bi;\n+\t\t\ttx_desc = I40E_TX_DESC(tx_ring, 0);\n+\t\t}\n+\n+\t\tprefetch(tx_desc);\n+\n+\t\t/* update budget accounting */\n+\t\tbudget--;\n+\t} while (likely(budget));\n+\n+\ti += tx_ring->count;\n+\ttx_ring->next_to_clean = i;\n+\tu64_stats_update_begin(&tx_ring->syncp);\n+\ttx_ring->stats.bytes += total_bytes;\n+\ttx_ring->stats.packets += total_packets;\n+\tu64_stats_update_end(&tx_ring->syncp);\n+\ttx_ring->q_vector->tx.total_bytes += total_bytes;\n+\ttx_ring->q_vector->tx.total_packets += total_packets;\n+\n+\tif (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {\n+\t\t/* check to see if there are < 4 descriptors\n+\t\t * waiting to be written back, then kick the hardware to force\n+\t\t * them to be written back in case we stay in NAPI.\n+\t\t * In this mode on X722 we do not enable Interrupt.\n+\t\t */\n+\t\tunsigned int j = i40e_get_tx_pending(tx_ring, false);\n+\n+\t\tif (budget &&\n+\t\t ((j / WB_STRIDE) == 0) && (j > 0) &&\n+\t\t !test_bit(__I40E_DOWN, &vsi->state) &&\n+\t\t (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))\n+\t\t\ttx_ring->arm_wb = true;\n+\t}\n+\n+\treturn !!budget;\n+}\n+\n /**\n * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled\n * @vsi: the VSI we care about\n@@ -1460,29 +1554,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)\n }\n \n /**\n- * i40e_reuse_rx_page - page flip buffer and store it back on the ring\n- * @rx_ring: rx descriptor ring to store buffers on\n- * @old_buff: donor buffer to have page reused\n- *\n- * Synchronizes page for reuse by the adapter\n- **/\n-static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,\n-\t\t\t struct i40e_rx_buffer *old_buff)\n-{\n-\tstruct i40e_rx_buffer *new_buff;\n-\tu16 nta = rx_ring->next_to_alloc;\n-\n-\tnew_buff = &rx_ring->rx_bi[nta];\n-\n-\t/* update, and store next to alloc */\n-\tnta++;\n-\trx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;\n-\n-\t/* transfer page from old buffer to new buffer */\n-\t*new_buff = *old_buff;\n-}\n-\n-/**\n * i40e_page_is_reusable - check if any reuse is possible\n * @page: page struct to check\n *\n@@ -1627,6 +1698,103 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,\n }\n \n /**\n+ * i40e_xdp_xmit_tail_bump - updates the tail and sets the RS bit\n+ * @xdp_ring: XDP Tx ring\n+ **/\n+static\n+void i40e_xdp_xmit_tail_bump(struct i40e_ring *xdp_ring)\n+{\n+\tstruct i40e_tx_desc *tx_desc;\n+\n+\t/* Set RS and bump tail */\n+\ttx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->curr_in_use);\n+\ttx_desc->cmd_type_offset_bsz |=\n+\t\tcpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);\n+\t/* Force memory writes to complete before letting h/w know\n+\t * there are new descriptors to fetch. (Only applicable for\n+\t * weak-ordered memory model archs, such as IA-64).\n+\t */\n+\twmb();\n+\twritel(xdp_ring->curr_in_use, xdp_ring->tail);\n+\n+\txdp_ring->xdp_needs_tail_bump = false;\n+}\n+\n+/**\n+ * i40e_xdp_xmit - transmit a frame on the XDP Tx queue\n+ * @xdp_ring: XDP Tx ring\n+ * @page: current page containing the frame\n+ * @page_offset: offset where the frame resides\n+ * @dma: Bus address of the frame\n+ * @size: size of the frame\n+ *\n+ * Returns true successfully sent.\n+ **/\n+static bool i40e_xdp_xmit(void *data, size_t size, struct page *page,\n+\t\t\t struct i40e_ring *xdp_ring)\n+{\n+\tstruct i40e_tx_buffer *tx_bi;\n+\tstruct i40e_tx_desc *tx_desc;\n+\tu16 i = xdp_ring->next_to_use;\n+\tdma_addr_t dma;\n+\n+\tif (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1)) {\n+\t\tif (xdp_ring->xdp_needs_tail_bump)\n+\t\t\ti40e_xdp_xmit_tail_bump(xdp_ring);\n+\t\txdp_ring->tx_stats.tx_busy++;\n+\t\treturn false;\n+\t}\n+\n+\ttx_bi = &xdp_ring->tx_bi[i];\n+\ttx_bi->bytecount = size;\n+\ttx_bi->gso_segs = 1;\n+\ttx_bi->tx_flags = I40E_TX_FLAGS_XDP;\n+\ttx_bi->page = page;\n+\n+\tdma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);\n+\tif (dma_mapping_error(xdp_ring->dev, dma))\n+\t\treturn false;\n+\n+\t/* record length, and DMA address */\n+\tdma_unmap_len_set(tx_bi, len, size);\n+\tdma_unmap_addr_set(tx_bi, dma, dma);\n+\n+\ttx_desc = I40E_TX_DESC(xdp_ring, i);\n+\ttx_desc->buffer_addr = cpu_to_le64(dma);\n+\ttx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC\n+\t\t\t\t\t\t | I40E_TX_DESC_CMD_EOP,\n+\t\t\t\t\t\t 0, size, 0);\n+\ttx_bi->next_to_watch = tx_desc;\n+\txdp_ring->curr_in_use = i++;\n+\txdp_ring->next_to_use = (i < xdp_ring->count) ? i : 0;\n+\txdp_ring->xdp_needs_tail_bump = true;\n+\treturn true;\n+}\n+\n+/**\n+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring\n+ * @rx_ring: rx descriptor ring to store buffers on\n+ * @old_buff: donor buffer to have page reused\n+ *\n+ * Synchronizes page for reuse by the adapter\n+ **/\n+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,\n+\t\t\t struct i40e_rx_buffer *old_buff)\n+{\n+\tstruct i40e_rx_buffer *new_buff;\n+\tu16 nta = rx_ring->next_to_alloc;\n+\n+\tnew_buff = &rx_ring->rx_bi[nta];\n+\n+\t/* update, and store next to alloc */\n+\tnta++;\n+\trx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;\n+\n+\t/* transfer page from old buffer to new buffer */\n+\t*new_buff = *old_buff;\n+}\n+\n+/**\n * i40e_run_xdp - Runs an XDP program for an Rx ring\n * @rx_ring: Rx ring used for XDP\n * @rx_buffer: current Rx buffer\n@@ -1643,8 +1811,14 @@ static bool i40e_run_xdp(struct i40e_ring *rx_ring,\n \t\t\t unsigned int size,\n \t\t\t struct bpf_prog *xdp_prog)\n {\n+#if (PAGE_SIZE < 8192)\n+\tunsigned int truesize = I40E_RXBUFFER_2048;\n+#else\n+\tunsigned int truesize = ALIGN(size, L1_CACHE_BYTES);\n+#endif\n \tstruct xdp_buff xdp;\n \tu32 xdp_action;\n+\tbool tx_ok;\n \n \tif (unlikely(!i40e_test_staterr(rx_desc,\n \t\t\t\t\tBIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {\n@@ -1661,10 +1835,21 @@ static bool i40e_run_xdp(struct i40e_ring *rx_ring,\n \tswitch (xdp_action) {\n \tcase XDP_PASS:\n \t\treturn false;\n-\tdefault:\n-\t\tbpf_warn_invalid_xdp_action(xdp_action);\n-\tcase XDP_ABORTED:\n \tcase XDP_TX:\n+\t\ttx_ok = i40e_xdp_xmit(xdp.data, size, rx_buffer->page,\n+\t\t\t\t rx_ring->xdp_sibling);\n+\t\tif (likely(tx_ok)) {\n+\t\t\tif (i40e_can_reuse_rx_page(rx_buffer, rx_buffer->page,\n+\t\t\t\t\t\t truesize)) {\n+\t\t\t\ti40e_reuse_rx_page(rx_ring, rx_buffer);\n+\t\t\t\trx_ring->rx_stats.page_reuse_count++;\n+\t\t\t} else {\n+\t\t\t\tdma_unmap_page(rx_ring->dev, rx_buffer->dma,\n+\t\t\t\t\t PAGE_SIZE, DMA_FROM_DEVICE);\n+\t\t\t}\n+\t\t\tbreak;\n+\t\t}\n+\tcase XDP_ABORTED:\n \tcase XDP_DROP:\n do_drop:\n \t\tif (likely(i40e_page_is_reusable(rx_buffer->page))) {\n@@ -1672,11 +1857,13 @@ static bool i40e_run_xdp(struct i40e_ring *rx_ring,\n \t\t\trx_ring->rx_stats.page_reuse_count++;\n \t\t\tbreak;\n \t\t}\n-\n-\t\t/* we are not reusing the buffer so unmap it */\n \t\tdma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,\n \t\t\t DMA_FROM_DEVICE);\n \t\t__free_pages(rx_buffer->page, 0);\n+\t\tbreak;\n+\tdefault:\n+\t\tbpf_warn_invalid_xdp_action(xdp_action);\n+\t\tgoto do_drop;\n \t}\n \n \t/* clear contents of buffer_info */\n@@ -2104,6 +2291,15 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)\n \t\tring->arm_wb = false;\n \t}\n \n+\ti40e_for_each_ring(ring, q_vector->xdp) {\n+\t\tif (!i40e_clean_xdp_irq(vsi, ring)) {\n+\t\t\tclean_complete = false;\n+\t\t\tcontinue;\n+\t\t}\n+\t\tarm_wb |= ring->arm_wb;\n+\t\tring->arm_wb = false;\n+\t}\n+\n \t/* Handle case where we are called by netpoll with a budget of 0 */\n \tif (budget <= 0)\n \t\tgoto tx_only;\n@@ -2116,6 +2312,9 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)\n \ti40e_for_each_ring(ring, q_vector->rx) {\n \t\tint cleaned = i40e_clean_rx_irq(ring, budget_per_ring);\n \n+\t\tif (ring->xdp_sibling && ring->xdp_sibling->xdp_needs_tail_bump)\n+\t\t\ti40e_xdp_xmit_tail_bump(ring->xdp_sibling);\n+\n \t\twork_done += cleaned;\n \t\t/* if we clean as many as budgeted, we must not be done */\n \t\tif (cleaned >= budget_per_ring)\ndiff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h\nindex 78d0aa0468f1..3250be70271d 100644\n--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h\n+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h\n@@ -233,6 +233,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)\n #define I40E_TX_FLAGS_TSYN\t\tBIT(8)\n #define I40E_TX_FLAGS_FD_SB\t\tBIT(9)\n #define I40E_TX_FLAGS_UDP_TUNNEL\tBIT(10)\n+#define I40E_TX_FLAGS_XDP\t\tBIT(11)\n #define I40E_TX_FLAGS_VLAN_MASK\t\t0xffff0000\n #define I40E_TX_FLAGS_VLAN_PRIO_MASK\t0xe0000000\n #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT\t29\n@@ -243,6 +244,7 @@ struct i40e_tx_buffer {\n \tunion {\n \t\tstruct sk_buff *skb;\n \t\tvoid *raw_buf;\n+\t\tstruct page *page;\n \t};\n \tunsigned int bytecount;\n \tunsigned short gso_segs;\n@@ -363,6 +365,9 @@ struct i40e_ring {\n \t\t\t\t\t */\n \n \tstruct bpf_prog __rcu *xdp_prog;\n+\tstruct i40e_ring *xdp_sibling; /* rx to xdp, and xdp to rx */\n+\tbool xdp_needs_tail_bump;\n+\tu16 curr_in_use;\n } ____cacheline_internodealigned_in_smp;\n \n enum i40e_latency_range {\n", "prefixes": [ "v4", "3/4" ] }