Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/974012/?format=api
{ "id": 974012, "url": "http://patchwork.ozlabs.org/api/patches/974012/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180924163557.1187-4-bjorn.topel@gmail.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20180924163557.1187-4-bjorn.topel@gmail.com>", "list_archive_url": null, "date": "2018-09-24T16:35:55", "name": "[3/5] ixgbe: add AF_XDP zero-copy Rx support", "commit_ref": null, "pull_url": null, "state": "superseded", "archived": false, "hash": "4eb072638c7969e562f8f4830241cacb7b511db4", "submitter": { "id": 70569, "url": "http://patchwork.ozlabs.org/api/people/70569/?format=api", "name": "Björn Töpel", "email": "bjorn.topel@gmail.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180924163557.1187-4-bjorn.topel@gmail.com/mbox/", "series": [ { "id": 67233, "url": "http://patchwork.ozlabs.org/api/series/67233/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=67233", "date": "2018-09-24T16:35:52", "name": "Introducing ixgbe AF_XDP ZC support", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/67233/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/974012/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/974012/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.138; helo=whitealder.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=gmail.com" ], "Received": [ "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 42Jqd72XYmz9s9h\n\tfor <incoming@patchwork.ozlabs.org>;\n\tTue, 25 Sep 2018 02:38:03 +1000 (AEST)", "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 9BFE685AE8;\n\tMon, 24 Sep 2018 16:38:01 +0000 (UTC)", "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id TdNLMSybzW8D; Mon, 24 Sep 2018 16:37:56 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id D114F85AB4;\n\tMon, 24 Sep 2018 16:37:56 +0000 (UTC)", "from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n\tby ash.osuosl.org (Postfix) with ESMTP id 41E141BFB70\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 24 Sep 2018 16:37:54 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id 3F1DB84F6F\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 24 Sep 2018 16:37:54 +0000 (UTC)", "from fraxinus.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id k28ciEsYEYct for <intel-wired-lan@lists.osuosl.org>;\n\tMon, 24 Sep 2018 16:37:52 +0000 (UTC)", "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby fraxinus.osuosl.org (Postfix) with ESMTPS id C1F748502B\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 24 Sep 2018 16:37:52 +0000 (UTC)", "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t24 Sep 2018 09:37:52 -0700", "from dandretz-mobl2.ger.corp.intel.com (HELO\n\tbtopel-mobl1.isw.intel.com) ([10.252.37.237])\n\tby FMSMGA003.fm.intel.com with ESMTP; 24 Sep 2018 09:36:42 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "from auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.54,298,1534834800\"; d=\"scan'208\";a=\"82877943\"", "From": "=?utf-8?b?QmrDtnJuIFTDtnBlbA==?= <bjorn.topel@gmail.com>", "To": "jeffrey.t.kirsher@intel.com,\n\tintel-wired-lan@lists.osuosl.org", "Date": "Mon, 24 Sep 2018 18:35:55 +0200", "Message-Id": "<20180924163557.1187-4-bjorn.topel@gmail.com>", "X-Mailer": "git-send-email 2.17.1", "In-Reply-To": "<20180924163557.1187-1-bjorn.topel@gmail.com>", "References": "<20180924163557.1187-1-bjorn.topel@gmail.com>", "MIME-Version": "1.0", "Subject": "[Intel-wired-lan] [PATCH 3/5] ixgbe: add AF_XDP zero-copy Rx support", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.24", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Cc": "daniel@iogearbox.net, netdev@vger.kernel.org, ast@kernel.org,\n\ttuc@vmware.com, u9012063@gmail.com, brouer@redhat.com, =?utf-8?q?Bj?=\n\t=?utf-8?b?w7ZybiBUw7ZwZWw=?= <bjorn.topel@intel.com>,\n\tmagnus.karlsson@gmail.com, magnus.karlsson@intel.com", "Content-Type": "text/plain; charset=\"utf-8\"", "Content-Transfer-Encoding": "base64", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "From: Björn Töpel <bjorn.topel@intel.com>\n\nThis patch adds zero-copy Rx support for AF_XDP sockets. Instead of\nallocating buffers of type MEM_TYPE_PAGE_SHARED, the Rx frames are\nallocated as MEM_TYPE_ZERO_COPY when AF_XDP is enabled for a certain\nqueue.\n\nAll AF_XDP specific functions are added to a new file, ixgbe_xsk.c.\n\nNote that when AF_XDP zero-copy is enabled, the XDP action XDP_PASS\nwill allocate a new buffer and copy the zero-copy frame prior passing\nit to the kernel stack.\n\nSigned-off-by: Björn Töpel <bjorn.topel@intel.com>\n---\n drivers/net/ethernet/intel/ixgbe/Makefile | 3 +-\n drivers/net/ethernet/intel/ixgbe/ixgbe.h | 26 +-\n drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 78 ++-\n .../ethernet/intel/ixgbe/ixgbe_txrx_common.h | 15 +\n drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 639 ++++++++++++++++++\n 5 files changed, 741 insertions(+), 20 deletions(-)\n create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c", "diff": "diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile\nindex 5414685189ce..ca6b0c458e4a 100644\n--- a/drivers/net/ethernet/intel/ixgbe/Makefile\n+++ b/drivers/net/ethernet/intel/ixgbe/Makefile\n@@ -8,7 +8,8 @@ obj-$(CONFIG_IXGBE) += ixgbe.o\n \n ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \\\n ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \\\n- ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o\n+ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \\\n+ ixgbe_xsk.o\n \n ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \\\n ixgbe_dcb_82599.o ixgbe_dcb_nl.o\ndiff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h\nindex 265db172042a..421fdac3a76d 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h\n@@ -228,13 +228,17 @@ struct ixgbe_tx_buffer {\n struct ixgbe_rx_buffer {\n \tstruct sk_buff *skb;\n \tdma_addr_t dma;\n-\tstruct page *page;\n-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)\n-\t__u32 page_offset;\n-#else\n-\t__u16 page_offset;\n-#endif\n-\t__u16 pagecnt_bias;\n+\tunion {\n+\t\tstruct {\n+\t\t\tstruct page *page;\n+\t\t\t__u32 page_offset;\n+\t\t\t__u16 pagecnt_bias;\n+\t\t};\n+\t\tstruct {\n+\t\t\tvoid *addr;\n+\t\t\tu64 handle;\n+\t\t};\n+\t};\n };\n \n struct ixgbe_queue_stats {\n@@ -348,6 +352,9 @@ struct ixgbe_ring {\n \t\tstruct ixgbe_rx_queue_stats rx_stats;\n \t};\n \tstruct xdp_rxq_info xdp_rxq;\n+\tstruct xdp_umem *xsk_umem;\n+\tstruct zero_copy_allocator zca; /* ZC allocator anchor */\n+\tu16 rx_buf_len;\n } ____cacheline_internodealigned_in_smp;\n \n enum ixgbe_ring_f_enum {\n@@ -765,6 +772,11 @@ struct ixgbe_adapter {\n #ifdef CONFIG_XFRM_OFFLOAD\n \tstruct ixgbe_ipsec *ipsec;\n #endif /* CONFIG_XFRM_OFFLOAD */\n+\n+\t/* AF_XDP zero-copy */\n+\tstruct xdp_umem **xsk_umems;\n+\tu16 num_xsk_umems_used;\n+\tu16 num_xsk_umems;\n };\n \n static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)\ndiff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\nindex 3e2e6fb2215a..4e6726c623a8 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\n@@ -34,6 +34,7 @@\n #include <net/tc_act/tc_mirred.h>\n #include <net/vxlan.h>\n #include <net/mpls.h>\n+#include <net/xdp_sock.h>\n \n #include \"ixgbe.h\"\n #include \"ixgbe_common.h\"\n@@ -3176,7 +3177,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)\n \t\tper_ring_budget = budget;\n \n \tixgbe_for_each_ring(ring, q_vector->rx) {\n-\t\tint cleaned = ixgbe_clean_rx_irq(q_vector, ring,\n+\t\tint cleaned = ring->xsk_umem ?\n+\t\t\t ixgbe_clean_rx_irq_zc(q_vector, ring,\n+\t\t\t\t\t\t per_ring_budget) :\n+\t\t\t ixgbe_clean_rx_irq(q_vector, ring,\n \t\t\t\t\t\t per_ring_budget);\n \n \t\twork_done += cleaned;\n@@ -3706,10 +3710,27 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,\n \tsrrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;\n \n \t/* configure the packet buffer length */\n-\tif (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))\n+\tif (rx_ring->xsk_umem) {\n+\t\tu32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -\n+\t\t\t\t XDP_PACKET_HEADROOM;\n+\n+\t\t/* If the MAC support setting RXDCTL.RLPML, the\n+\t\t * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and\n+\t\t * RXDCTL.RLPML is set to the actual UMEM buffer\n+\t\t * size. If not, then we are stuck with a 1k buffer\n+\t\t * size resolution. In this case frames larger than\n+\t\t * the UMEM buffer size viewed in a 1k resolution will\n+\t\t * be dropped.\n+\t\t */\n+\t\tif (hw->mac.type != ixgbe_mac_82599EB)\n+\t\t\tsrrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;\n+\t\telse\n+\t\t\tsrrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;\n+\t} else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {\n \t\tsrrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;\n-\telse\n+\t} else {\n \t\tsrrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;\n+\t}\n \n \t/* configure descriptor type */\n \tsrrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;\n@@ -4032,6 +4053,19 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,\n \tu32 rxdctl;\n \tu8 reg_idx = ring->reg_idx;\n \n+\txdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);\n+\tring->xsk_umem = ixgbe_xsk_umem(adapter, ring);\n+\tif (ring->xsk_umem) {\n+\t\tring->zca.free = ixgbe_zca_free;\n+\t\tWARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,\n+\t\t\t\t\t\t MEM_TYPE_ZERO_COPY,\n+\t\t\t\t\t\t &ring->zca));\n+\n+\t} else {\n+\t\tWARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,\n+\t\t\t\t\t\t MEM_TYPE_PAGE_SHARED, NULL));\n+\t}\n+\n \t/* disable queue to avoid use of these values while updating state */\n \trxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));\n \trxdctl &= ~IXGBE_RXDCTL_ENABLE;\n@@ -4081,6 +4115,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,\n #endif\n \t}\n \n+\tif (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {\n+\t\tu32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -\n+\t\t\t\t XDP_PACKET_HEADROOM;\n+\n+\t\trxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |\n+\t\t\t IXGBE_RXDCTL_RLPML_EN);\n+\t\trxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;\n+\n+\t\tring->rx_buf_len = xsk_buf_len;\n+\t}\n+\n \t/* initialize rx_buffer_info */\n \tmemset(ring->rx_buffer_info, 0,\n \t sizeof(struct ixgbe_rx_buffer) * ring->count);\n@@ -4094,7 +4139,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,\n \tIXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);\n \n \tixgbe_rx_desc_queue_enable(adapter, ring);\n-\tixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));\n+\tif (ring->xsk_umem)\n+\t\tixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));\n+\telse\n+\t\tixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));\n }\n \n static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)\n@@ -5202,6 +5250,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)\n \tu16 i = rx_ring->next_to_clean;\n \tstruct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];\n \n+\tif (rx_ring->xsk_umem) {\n+\t\tixgbe_xsk_clean_rx_ring(rx_ring);\n+\t\tgoto skip_free;\n+\t}\n+\n \t/* Free all the Rx ring sk_buffs */\n \twhile (i != rx_ring->next_to_alloc) {\n \t\tif (rx_buffer->skb) {\n@@ -5240,6 +5293,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)\n \t\t}\n \t}\n \n+skip_free:\n \trx_ring->next_to_alloc = 0;\n \trx_ring->next_to_clean = 0;\n \trx_ring->next_to_use = 0;\n@@ -6435,7 +6489,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,\n \tstruct device *dev = rx_ring->dev;\n \tint orig_node = dev_to_node(dev);\n \tint ring_node = -1;\n-\tint size, err;\n+\tint size;\n \n \tsize = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;\n \n@@ -6472,13 +6526,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,\n \t\t\t rx_ring->queue_index) < 0)\n \t\tgoto err;\n \n-\terr = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,\n-\t\t\t\t\t MEM_TYPE_PAGE_SHARED, NULL);\n-\tif (err) {\n-\t\txdp_rxq_info_unreg(&rx_ring->xdp_rxq);\n-\t\tgoto err;\n-\t}\n-\n \trx_ring->xdp_prog = adapter->xdp_prog;\n \n \treturn 0;\n@@ -10216,6 +10263,13 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)\n \t\txdp->prog_id = adapter->xdp_prog ?\n \t\t\tadapter->xdp_prog->aux->id : 0;\n \t\treturn 0;\n+\tcase XDP_QUERY_XSK_UMEM:\n+\t\treturn ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem,\n+\t\t\t\t\t xdp->xsk.queue_id);\n+\tcase XDP_SETUP_XSK_UMEM:\n+\t\treturn ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,\n+\t\t\t\t\t xdp->xsk.queue_id);\n+\n \tdefault:\n \t\treturn -EINVAL;\n \t}\ndiff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h\nindex 3780d315b991..cf219f4e009d 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h\n@@ -23,4 +23,19 @@ void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,\n void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);\n void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);\n \n+struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,\n+\t\t\t\tstruct ixgbe_ring *ring);\n+int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,\n+\t\t\t u16 qid);\n+int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,\n+\t\t\t u16 qid);\n+\n+void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);\n+\n+void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);\n+int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,\n+\t\t\t struct ixgbe_ring *rx_ring,\n+\t\t\t const int budget);\n+void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring);\n+\n #endif /* #define _IXGBE_TXRX_COMMON_H_ */\ndiff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c\nnew file mode 100644\nindex 000000000000..253ce3cfbcf1\n--- /dev/null\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c\n@@ -0,0 +1,639 @@\n+// SPDX-License-Identifier: GPL-2.0\n+/* Copyright(c) 2018 Intel Corporation. */\n+\n+#include <linux/bpf_trace.h>\n+#include <net/xdp_sock.h>\n+#include <net/xdp.h>\n+\n+#include \"ixgbe.h\"\n+#include \"ixgbe_txrx_common.h\"\n+\n+struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,\n+\t\t\t\tstruct ixgbe_ring *ring)\n+{\n+\tbool xdp_on = READ_ONCE(adapter->xdp_prog);\n+\tint qid = ring->queue_index;\n+\n+\tif (!adapter->xsk_umems || !adapter->xsk_umems[qid] || !xdp_on)\n+\t\treturn NULL;\n+\n+\treturn adapter->xsk_umems[qid];\n+}\n+\n+static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter)\n+{\n+\tif (adapter->xsk_umems)\n+\t\treturn 0;\n+\n+\tadapter->num_xsk_umems_used = 0;\n+\tadapter->num_xsk_umems = adapter->num_rx_queues;\n+\tadapter->xsk_umems = kcalloc(adapter->num_xsk_umems,\n+\t\t\t\t sizeof(*adapter->xsk_umems),\n+\t\t\t\t GFP_KERNEL);\n+\tif (!adapter->xsk_umems) {\n+\t\tadapter->num_xsk_umems = 0;\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter,\n+\t\t\t struct xdp_umem *umem,\n+\t\t\t u16 qid)\n+{\n+\tint err;\n+\n+\terr = ixgbe_alloc_xsk_umems(adapter);\n+\tif (err)\n+\t\treturn err;\n+\n+\tadapter->xsk_umems[qid] = umem;\n+\tadapter->num_xsk_umems_used++;\n+\n+\treturn 0;\n+}\n+\n+static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid)\n+{\n+\tadapter->xsk_umems[qid] = NULL;\n+\tadapter->num_xsk_umems_used--;\n+\n+\tif (adapter->num_xsk_umems == 0) {\n+\t\tkfree(adapter->xsk_umems);\n+\t\tadapter->xsk_umems = NULL;\n+\t\tadapter->num_xsk_umems = 0;\n+\t}\n+}\n+\n+static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,\n+\t\t\t\t struct xdp_umem *umem)\n+{\n+\tstruct device *dev = &adapter->pdev->dev;\n+\tunsigned int i, j;\n+\tdma_addr_t dma;\n+\n+\tfor (i = 0; i < umem->npgs; i++) {\n+\t\tdma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,\n+\t\t\t\t\t DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);\n+\t\tif (dma_mapping_error(dev, dma))\n+\t\t\tgoto out_unmap;\n+\n+\t\tumem->pages[i].dma = dma;\n+\t}\n+\n+\treturn 0;\n+\n+out_unmap:\n+\tfor (j = 0; j < i; j++) {\n+\t\tdma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,\n+\t\t\t\t DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);\n+\t\tumem->pages[i].dma = 0;\n+\t}\n+\n+\treturn -1;\n+}\n+\n+static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,\n+\t\t\t\t struct xdp_umem *umem)\n+{\n+\tstruct device *dev = &adapter->pdev->dev;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < umem->npgs; i++) {\n+\t\tdma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,\n+\t\t\t\t DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);\n+\n+\t\tumem->pages[i].dma = 0;\n+\t}\n+}\n+\n+static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,\n+\t\t\t\t struct xdp_umem *umem,\n+\t\t\t\t u16 qid)\n+{\n+\tstruct xdp_umem_fq_reuse *reuseq;\n+\tbool if_running;\n+\tint err;\n+\n+\tif (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)\n+\t\treturn -EINVAL;\n+\n+\tif (adapter->flags & IXGBE_FLAG_DCB_ENABLED)\n+\t\treturn -EINVAL;\n+\n+\tif (qid >= adapter->num_rx_queues)\n+\t\treturn -EINVAL;\n+\n+\tif (adapter->xsk_umems) {\n+\t\tif (qid >= adapter->num_xsk_umems)\n+\t\t\treturn -EINVAL;\n+\t\tif (adapter->xsk_umems[qid])\n+\t\t\treturn -EBUSY;\n+\t}\n+\n+\treuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);\n+\tif (!reuseq)\n+\t\treturn -ENOMEM;\n+\n+\txsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));\n+\n+\terr = ixgbe_xsk_umem_dma_map(adapter, umem);\n+\tif (err)\n+\t\treturn err;\n+\n+\tif_running = netif_running(adapter->netdev) &&\n+\t\t READ_ONCE(adapter->xdp_prog);\n+\n+\tif (if_running)\n+\t\tixgbe_txrx_ring_disable(adapter, qid);\n+\n+\terr = ixgbe_add_xsk_umem(adapter, umem, qid);\n+\n+\tif (if_running)\n+\t\tixgbe_txrx_ring_enable(adapter, qid);\n+\n+\treturn err;\n+}\n+\n+static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)\n+{\n+\tbool if_running;\n+\n+\tif (!adapter->xsk_umems || qid >= adapter->num_xsk_umems ||\n+\t !adapter->xsk_umems[qid])\n+\t\treturn -EINVAL;\n+\n+\tif_running = netif_running(adapter->netdev) &&\n+\t\t READ_ONCE(adapter->xdp_prog);\n+\n+\tif (if_running)\n+\t\tixgbe_txrx_ring_disable(adapter, qid);\n+\n+\tixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]);\n+\tixgbe_remove_xsk_umem(adapter, qid);\n+\n+\tif (if_running)\n+\t\tixgbe_txrx_ring_enable(adapter, qid);\n+\n+\treturn 0;\n+}\n+\n+int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,\n+\t\t\t u16 qid)\n+{\n+\tif (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)\n+\t\treturn -EINVAL;\n+\n+\tif (adapter->flags & IXGBE_FLAG_DCB_ENABLED)\n+\t\treturn -EINVAL;\n+\n+\tif (qid >= adapter->num_rx_queues)\n+\t\treturn -EINVAL;\n+\n+\tif (adapter->xsk_umems) {\n+\t\tif (qid >= adapter->num_xsk_umems)\n+\t\t\treturn -EINVAL;\n+\t\t*umem = adapter->xsk_umems[qid];\n+\t\treturn 0;\n+\t}\n+\n+\t*umem = NULL;\n+\treturn 0;\n+}\n+\n+int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,\n+\t\t\t u16 qid)\n+{\n+\treturn umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :\n+\t\tixgbe_xsk_umem_disable(adapter, qid);\n+}\n+\n+static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,\n+\t\t\t struct ixgbe_ring *rx_ring,\n+\t\t\t struct xdp_buff *xdp)\n+{\n+\tint err, result = IXGBE_XDP_PASS;\n+\tstruct bpf_prog *xdp_prog;\n+\tstruct xdp_frame *xdpf;\n+\tu32 act;\n+\n+\trcu_read_lock();\n+\txdp_prog = READ_ONCE(rx_ring->xdp_prog);\n+\tact = bpf_prog_run_xdp(xdp_prog, xdp);\n+\txdp->handle += xdp->data - xdp->data_hard_start;\n+\tswitch (act) {\n+\tcase XDP_PASS:\n+\t\tbreak;\n+\tcase XDP_TX:\n+\t\txdpf = convert_to_xdp_frame(xdp);\n+\t\tif (unlikely(!xdpf)) {\n+\t\t\tresult = IXGBE_XDP_CONSUMED;\n+\t\t\tbreak;\n+\t\t}\n+\t\tresult = ixgbe_xmit_xdp_ring(adapter, xdpf);\n+\t\tbreak;\n+\tcase XDP_REDIRECT:\n+\t\terr = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);\n+\t\tresult = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;\n+\t\tbreak;\n+\tdefault:\n+\t\tbpf_warn_invalid_xdp_action(act);\n+\t\t/* fallthrough */\n+\tcase XDP_ABORTED:\n+\t\ttrace_xdp_exception(rx_ring->netdev, xdp_prog, act);\n+\t\t/* fallthrough -- handle aborts by dropping packet */\n+\tcase XDP_DROP:\n+\t\tresult = IXGBE_XDP_CONSUMED;\n+\t\tbreak;\n+\t}\n+\trcu_read_unlock();\n+\treturn result;\n+}\n+\n+static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(\n+\tstruct ixgbe_ring *rx_ring,\n+\tunsigned int size)\n+{\n+\tstruct ixgbe_rx_buffer *bi;\n+\n+\tbi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];\n+\n+\t/* we are reusing so sync this buffer for CPU use */\n+\tdma_sync_single_range_for_cpu(rx_ring->dev,\n+\t\t\t\t bi->dma, 0,\n+\t\t\t\t size,\n+\t\t\t\t DMA_BIDIRECTIONAL);\n+\n+\treturn bi;\n+}\n+\n+static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,\n+\t\t\t\t struct ixgbe_rx_buffer *obi)\n+{\n+\tunsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;\n+\tu64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;\n+\tu16 nta = rx_ring->next_to_alloc;\n+\tstruct ixgbe_rx_buffer *nbi;\n+\n+\tnbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];\n+\t/* update, and store next to alloc */\n+\tnta++;\n+\trx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;\n+\n+\t/* transfer page from old buffer to new buffer */\n+\tnbi->dma = obi->dma & mask;\n+\tnbi->dma += hr;\n+\n+\tnbi->addr = (void *)((unsigned long)obi->addr & mask);\n+\tnbi->addr += hr;\n+\n+\tnbi->handle = obi->handle & mask;\n+\tnbi->handle += rx_ring->xsk_umem->headroom;\n+\n+\tobi->addr = NULL;\n+\tobi->skb = NULL;\n+}\n+\n+void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)\n+{\n+\tstruct ixgbe_rx_buffer *bi;\n+\tstruct ixgbe_ring *rx_ring;\n+\tu64 hr, mask;\n+\tu16 nta;\n+\n+\trx_ring = container_of(alloc, struct ixgbe_ring, zca);\n+\thr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;\n+\tmask = rx_ring->xsk_umem->chunk_mask;\n+\n+\tnta = rx_ring->next_to_alloc;\n+\tbi = rx_ring->rx_buffer_info;\n+\n+\tnta++;\n+\trx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;\n+\n+\thandle &= mask;\n+\n+\tbi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);\n+\tbi->dma += hr;\n+\n+\tbi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);\n+\tbi->addr += hr;\n+\n+\tbi->handle = (u64)handle + rx_ring->xsk_umem->headroom;\n+}\n+\n+static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,\n+\t\t\t\t struct ixgbe_rx_buffer *bi)\n+{\n+\tstruct xdp_umem *umem = rx_ring->xsk_umem;\n+\tvoid *addr = bi->addr;\n+\tu64 handle, hr;\n+\n+\tif (addr)\n+\t\treturn true;\n+\n+\tif (!xsk_umem_peek_addr(umem, &handle)) {\n+\t\trx_ring->rx_stats.alloc_rx_page_failed++;\n+\t\treturn false;\n+\t}\n+\n+\thr = umem->headroom + XDP_PACKET_HEADROOM;\n+\n+\tbi->dma = xdp_umem_get_dma(umem, handle);\n+\tbi->dma += hr;\n+\n+\tbi->addr = xdp_umem_get_data(umem, handle);\n+\tbi->addr += hr;\n+\n+\tbi->handle = handle + umem->headroom;\n+\n+\txsk_umem_discard_addr(umem);\n+\treturn true;\n+}\n+\n+static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,\n+\t\t\t\t struct ixgbe_rx_buffer *bi)\n+{\n+\tstruct xdp_umem *umem = rx_ring->xsk_umem;\n+\tu64 handle, hr;\n+\n+\tif (!xsk_umem_peek_addr_rq(umem, &handle)) {\n+\t\trx_ring->rx_stats.alloc_rx_page_failed++;\n+\t\treturn false;\n+\t}\n+\n+\thandle &= rx_ring->xsk_umem->chunk_mask;\n+\n+\thr = umem->headroom + XDP_PACKET_HEADROOM;\n+\n+\tbi->dma = xdp_umem_get_dma(umem, handle);\n+\tbi->dma += hr;\n+\n+\tbi->addr = xdp_umem_get_data(umem, handle);\n+\tbi->addr += hr;\n+\n+\tbi->handle = handle + umem->headroom;\n+\n+\txsk_umem_discard_addr_rq(umem);\n+\treturn true;\n+}\n+\n+static __always_inline bool __ixgbe_alloc_rx_buffers_zc(\n+\tstruct ixgbe_ring *rx_ring,\n+\tu16 cleaned_count,\n+\tbool alloc(struct ixgbe_ring *rx_ring,\n+\t\t struct ixgbe_rx_buffer *bi))\n+{\n+\tunion ixgbe_adv_rx_desc *rx_desc;\n+\tstruct ixgbe_rx_buffer *bi;\n+\tu16 i = rx_ring->next_to_use;\n+\tbool ok = true;\n+\n+\t/* nothing to do */\n+\tif (!cleaned_count)\n+\t\treturn true;\n+\n+\trx_desc = IXGBE_RX_DESC(rx_ring, i);\n+\tbi = &rx_ring->rx_buffer_info[i];\n+\ti -= rx_ring->count;\n+\n+\tdo {\n+\t\tif (!alloc(rx_ring, bi)) {\n+\t\t\tok = false;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\t/* sync the buffer for use by the device */\n+\t\tdma_sync_single_range_for_device(rx_ring->dev, bi->dma,\n+\t\t\t\t\t\t bi->page_offset,\n+\t\t\t\t\t\t rx_ring->rx_buf_len,\n+\t\t\t\t\t\t DMA_BIDIRECTIONAL);\n+\n+\t\t/* Refresh the desc even if buffer_addrs didn't change\n+\t\t * because each write-back erases this info.\n+\t\t */\n+\t\trx_desc->read.pkt_addr = cpu_to_le64(bi->dma);\n+\n+\t\trx_desc++;\n+\t\tbi++;\n+\t\ti++;\n+\t\tif (unlikely(!i)) {\n+\t\t\trx_desc = IXGBE_RX_DESC(rx_ring, 0);\n+\t\t\tbi = rx_ring->rx_buffer_info;\n+\t\t\ti -= rx_ring->count;\n+\t\t}\n+\n+\t\t/* clear the length for the next_to_use descriptor */\n+\t\trx_desc->wb.upper.length = 0;\n+\n+\t\tcleaned_count--;\n+\t} while (cleaned_count);\n+\n+\ti += rx_ring->count;\n+\n+\tif (rx_ring->next_to_use != i) {\n+\t\trx_ring->next_to_use = i;\n+\n+\t\t/* update next to alloc since we have filled the ring */\n+\t\trx_ring->next_to_alloc = i;\n+\n+\t\t/* Force memory writes to complete before letting h/w\n+\t\t * know there are new descriptors to fetch. (Only\n+\t\t * applicable for weak-ordered memory model archs,\n+\t\t * such as IA-64).\n+\t\t */\n+\t\twmb();\n+\t\twritel(i, rx_ring->tail);\n+\t}\n+\n+\treturn ok;\n+}\n+\n+void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)\n+{\n+\t__ixgbe_alloc_rx_buffers_zc(rx_ring, count,\n+\t\t\t\t ixgbe_alloc_buffer_slow_zc);\n+}\n+\n+static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,\n+\t\t\t\t\t u16 count)\n+{\n+\treturn __ixgbe_alloc_rx_buffers_zc(rx_ring, count,\n+\t\t\t\t\t ixgbe_alloc_buffer_zc);\n+}\n+\n+static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,\n+\t\t\t\t\t struct ixgbe_rx_buffer *bi,\n+\t\t\t\t\t struct xdp_buff *xdp)\n+{\n+\tunsigned int metasize = xdp->data - xdp->data_meta;\n+\tunsigned int datasize = xdp->data_end - xdp->data;\n+\tstruct sk_buff *skb;\n+\n+\t/* allocate a skb to store the frags */\n+\tskb = __napi_alloc_skb(&rx_ring->q_vector->napi,\n+\t\t\t xdp->data_end - xdp->data_hard_start,\n+\t\t\t GFP_ATOMIC | __GFP_NOWARN);\n+\tif (unlikely(!skb))\n+\t\treturn NULL;\n+\n+\tskb_reserve(skb, xdp->data - xdp->data_hard_start);\n+\tmemcpy(__skb_put(skb, datasize), xdp->data, datasize);\n+\tif (metasize)\n+\t\tskb_metadata_set(skb, metasize);\n+\n+\tixgbe_reuse_rx_buffer_zc(rx_ring, bi);\n+\treturn skb;\n+}\n+\n+static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)\n+{\n+\tu32 ntc = rx_ring->next_to_clean + 1;\n+\n+\tntc = (ntc < rx_ring->count) ? ntc : 0;\n+\trx_ring->next_to_clean = ntc;\n+\tprefetch(IXGBE_RX_DESC(rx_ring, ntc));\n+}\n+\n+int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,\n+\t\t\t struct ixgbe_ring *rx_ring,\n+\t\t\t const int budget)\n+{\n+\tunsigned int total_rx_bytes = 0, total_rx_packets = 0;\n+\tstruct ixgbe_adapter *adapter = q_vector->adapter;\n+\tu16 cleaned_count = ixgbe_desc_unused(rx_ring);\n+\tunsigned int xdp_res, xdp_xmit = 0;\n+\tbool failure = false;\n+\tstruct sk_buff *skb;\n+\tstruct xdp_buff xdp;\n+\n+\txdp.rxq = &rx_ring->xdp_rxq;\n+\n+\twhile (likely(total_rx_packets < budget)) {\n+\t\tunion ixgbe_adv_rx_desc *rx_desc;\n+\t\tstruct ixgbe_rx_buffer *bi;\n+\t\tunsigned int size;\n+\n+\t\t/* return some buffers to hardware, one at a time is too slow */\n+\t\tif (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {\n+\t\t\tfailure = failure ||\n+\t\t\t\t !ixgbe_alloc_rx_buffers_fast_zc(\n+\t\t\t\t\t rx_ring,\n+\t\t\t\t\t cleaned_count);\n+\t\t\tcleaned_count = 0;\n+\t\t}\n+\n+\t\trx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);\n+\t\tsize = le16_to_cpu(rx_desc->wb.upper.length);\n+\t\tif (!size)\n+\t\t\tbreak;\n+\n+\t\t/* This memory barrier is needed to keep us from reading\n+\t\t * any other fields out of the rx_desc until we know the\n+\t\t * descriptor has been written back\n+\t\t */\n+\t\tdma_rmb();\n+\n+\t\tbi = ixgbe_get_rx_buffer_zc(rx_ring, size);\n+\n+\t\tif (unlikely(!ixgbe_test_staterr(rx_desc,\n+\t\t\t\t\t\t IXGBE_RXD_STAT_EOP))) {\n+\t\t\tstruct ixgbe_rx_buffer *next_bi;\n+\n+\t\t\tixgbe_reuse_rx_buffer_zc(rx_ring, bi);\n+\t\t\tixgbe_inc_ntc(rx_ring);\n+\t\t\tnext_bi = &rx_ring->rx_buffer_info[\n+\t\t\t\trx_ring->next_to_clean];\n+\t\t\tnext_bi->skb = ERR_PTR(-EINVAL);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tif (unlikely(bi->skb)) {\n+\t\t\tixgbe_reuse_rx_buffer_zc(rx_ring, bi);\n+\t\t\tixgbe_inc_ntc(rx_ring);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\txdp.data = bi->addr;\n+\t\txdp.data_meta = xdp.data;\n+\t\txdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;\n+\t\txdp.data_end = xdp.data + size;\n+\t\txdp.handle = bi->handle;\n+\n+\t\txdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);\n+\n+\t\tif (xdp_res) {\n+\t\t\tif (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {\n+\t\t\t\txdp_xmit |= xdp_res;\n+\t\t\t\tbi->addr = NULL;\n+\t\t\t\tbi->skb = NULL;\n+\t\t\t} else {\n+\t\t\t\tixgbe_reuse_rx_buffer_zc(rx_ring, bi);\n+\t\t\t}\n+\t\t\ttotal_rx_packets++;\n+\t\t\ttotal_rx_bytes += size;\n+\n+\t\t\tcleaned_count++;\n+\t\t\tixgbe_inc_ntc(rx_ring);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/* XDP_PASS path */\n+\t\tskb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);\n+\t\tif (!skb) {\n+\t\t\trx_ring->rx_stats.alloc_rx_buff_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tcleaned_count++;\n+\t\tixgbe_inc_ntc(rx_ring);\n+\n+\t\tif (eth_skb_pad(skb))\n+\t\t\tcontinue;\n+\n+\t\ttotal_rx_bytes += skb->len;\n+\t\ttotal_rx_packets++;\n+\n+\t\tixgbe_process_skb_fields(rx_ring, rx_desc, skb);\n+\t\tixgbe_rx_skb(q_vector, skb);\n+\t}\n+\n+\tif (xdp_xmit & IXGBE_XDP_REDIR)\n+\t\txdp_do_flush_map();\n+\n+\tif (xdp_xmit & IXGBE_XDP_TX) {\n+\t\tstruct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];\n+\n+\t\t/* Force memory writes to complete before letting h/w\n+\t\t * know there are new descriptors to fetch.\n+\t\t */\n+\t\twmb();\n+\t\twritel(ring->next_to_use, ring->tail);\n+\t}\n+\n+\tu64_stats_update_begin(&rx_ring->syncp);\n+\trx_ring->stats.packets += total_rx_packets;\n+\trx_ring->stats.bytes += total_rx_bytes;\n+\tu64_stats_update_end(&rx_ring->syncp);\n+\tq_vector->rx.total_packets += total_rx_packets;\n+\tq_vector->rx.total_bytes += total_rx_bytes;\n+\n+\treturn failure ? budget : (int)total_rx_packets;\n+}\n+\n+void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)\n+{\n+\tu16 i = rx_ring->next_to_clean;\n+\tstruct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];\n+\n+\twhile (i != rx_ring->next_to_alloc) {\n+\t\txsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);\n+\t\ti++;\n+\t\tbi++;\n+\t\tif (i == rx_ring->count) {\n+\t\t\ti = 0;\n+\t\t\tbi = rx_ring->rx_buffer_info;\n+\t\t}\n+\t}\n+}\n", "prefixes": [ "3/5" ] }