get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1317325/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1317325,
    "url": "http://patchwork.ozlabs.org/api/patches/1317325/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20200626031633.1217160-1-jeffrey.t.kirsher@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20200626031633.1217160-1-jeffrey.t.kirsher@intel.com>",
    "list_archive_url": null,
    "date": "2020-06-26T03:16:33",
    "name": "[next-queue,v2] igb: add XDP support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "5f1734cc5c00b076496844ff3f93a33004a2c727",
    "submitter": {
        "id": 473,
        "url": "http://patchwork.ozlabs.org/api/people/473/?format=api",
        "name": "Kirsher, Jeffrey T",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20200626031633.1217160-1-jeffrey.t.kirsher@intel.com/mbox/",
    "series": [
        {
            "id": 185907,
            "url": "http://patchwork.ozlabs.org/api/series/185907/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=185907",
            "date": "2020-06-26T03:16:33",
            "name": "[next-queue,v2] igb: add XDP support",
            "version": 2,
            "mbox": "http://patchwork.ozlabs.org/series/185907/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1317325/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/1317325/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=osuosl.org\n (client-ip=140.211.166.133; helo=hemlock.osuosl.org;\n envelope-from=intel-wired-lan-bounces@osuosl.org; receiver=<UNKNOWN>)",
            "ozlabs.org;\n dmarc=fail (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 49tMVg2hG3z9sRR\n\tfor <incoming@patchwork.ozlabs.org>; Fri, 26 Jun 2020 13:16:43 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id EA1B08800F;\n\tFri, 26 Jun 2020 03:16:41 +0000 (UTC)",
            "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 6Tv6IlBGjcJa; Fri, 26 Jun 2020 03:16:39 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 4C8FD87FFF;\n\tFri, 26 Jun 2020 03:16:39 +0000 (UTC)",
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n by ash.osuosl.org (Postfix) with ESMTP id 1D3801BF9BD\n for <intel-wired-lan@lists.osuosl.org>; Fri, 26 Jun 2020 03:16:38 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n by whitealder.osuosl.org (Postfix) with ESMTP id 1659285EA8\n for <intel-wired-lan@lists.osuosl.org>; Fri, 26 Jun 2020 03:16:38 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n with ESMTP id ED7va+uFpUEE for <intel-wired-lan@lists.osuosl.org>;\n Fri, 26 Jun 2020 03:16:35 +0000 (UTC)",
            "from mga12.intel.com (mga12.intel.com [192.55.52.136])\n by whitealder.osuosl.org (Postfix) with ESMTPS id B2AD885D26\n for <intel-wired-lan@lists.osuosl.org>; Fri, 26 Jun 2020 03:16:35 +0000 (UTC)",
            "from fmsmga008.fm.intel.com ([10.253.24.58])\n by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 25 Jun 2020 20:16:35 -0700",
            "from jtkirshe-desk1.jf.intel.com ([134.134.177.86])\n by fmsmga008.fm.intel.com with ESMTP; 25 Jun 2020 20:16:35 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "IronPort-SDR": [
            "\n EXZ0iOSfggP98yOJtcwIa03MFh+uMre2HA9Ga98jucJEdRxCyj1xnujEFw98JqNcgfDo6I2dm7\n injfk6sVPHXA==",
            "\n KqsR467V1QGOmHb2MlJMmhBo5QJGikPEuQpJUjEKnGrp5ZFXyC4HcFxcwUiY/ImnDnKJ1nE3fu\n +iSMqVfH6Etg=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9663\"; a=\"124822655\"",
            "E=Sophos;i=\"5.75,282,1589266800\"; d=\"scan'208\";a=\"124822655\"",
            "E=Sophos;i=\"5.75,282,1589266800\"; d=\"scan'208\";a=\"265522589\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Jeff Kirsher <jeffrey.t.kirsher@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Thu, 25 Jun 2020 20:16:33 -0700",
        "Message-Id": "<20200626031633.1217160-1-jeffrey.t.kirsher@intel.com>",
        "X-Mailer": "git-send-email 2.26.2",
        "MIME-Version": "1.0",
        "Subject": "[Intel-wired-lan] [next-queue v2] igb: add XDP support",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n <intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>,\n <mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>,\n <mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "From: Sven Auhagen <sven.auhagen@voleatech.de>\n\nAdd XDP support to the IGB driver.\nThe implementation follows the IXGBE XDP implementation\nclosely and I used the following patches as basis:\n\n1. commit 924708081629 (\"ixgbe: add XDP support for pass and drop actions\")\n2. commit 33fdc82f0883 (\"ixgbe: add support for XDP_TX action\")\n3. commit ed93a3987128 (\"ixgbe: tweak page counting for XDP_REDIRECT\")\n\nDue to the hardware constraints of the devices using the\nIGB driver we must share the TX queues with XDP which\nmeans locking the TX queue also for non XDP cases.\nThis comes with a small penalty ~5% in my tests.\n\nI ran tests on an older device to get better numbers.\nTest machine:\n\nIntel(R) Atom(TM) CPU C2338 @ 1.74GHz (2 Cores)\n2x Intel I211\n\nRouting Original Driver Network Stack: 382 Kpps\nRouting XDP Driver Network Stack: 364 Kpps\n\nRouting XDP Redirect (xdp_fwd_kern): 1.48 Mpps\nXDP Drop: 1.48 Mpps\n\nUsing XDP we can achieve line rate forwarding even on\non older Intel Atom CPU.\n\nSigned-off-by: Sven Auhagen <sven.auhagen@voleatech.de>\nReported-by: kernel test robot <lkp@intel.com>\n---\nv2: original did not apply to my dev-queue branch, so fixed the\n    conflicts in the patch\n\n drivers/net/ethernet/intel/igb/igb.h         |  84 +++-\n drivers/net/ethernet/intel/igb/igb_ethtool.c |   8 +-\n drivers/net/ethernet/intel/igb/igb_main.c    | 466 +++++++++++++++++--\n 3 files changed, 510 insertions(+), 48 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h\nindex 2f015b60a995..3d518cc35880 100644\n--- a/drivers/net/ethernet/intel/igb/igb.h\n+++ b/drivers/net/ethernet/intel/igb/igb.h\n@@ -19,6 +19,8 @@\n #include <linux/pci.h>\n #include <linux/mdio.h>\n \n+#include <net/xdp.h>\n+\n struct igb_adapter;\n \n #define E1000_PCS_CFG_IGN_SD\t1\n@@ -79,6 +81,12 @@ struct igb_adapter;\n #define IGB_I210_RX_LATENCY_100\t\t2213\n #define IGB_I210_RX_LATENCY_1000\t448\n \n+/* XDP */\n+#define IGB_XDP_PASS\t\t0\n+#define IGB_XDP_CONSUMED\tBIT(0)\n+#define IGB_XDP_TX\t\tBIT(1)\n+#define IGB_XDP_REDIR\t\tBIT(2)\n+\n struct vf_data_storage {\n \tunsigned char vf_mac_addresses[ETH_ALEN];\n \tu16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];\n@@ -132,17 +140,62 @@ struct vf_mac_filter {\n \n /* Supported Rx Buffer Sizes */\n #define IGB_RXBUFFER_256\t256\n+#define IGB_RXBUFFER_1536\t1536\n #define IGB_RXBUFFER_2048\t2048\n #define IGB_RXBUFFER_3072\t3072\n #define IGB_RX_HDR_LEN\t\tIGB_RXBUFFER_256\n #define IGB_TS_HDR_LEN\t\t16\n \n-#define IGB_SKB_PAD\t\t(NET_SKB_PAD + NET_IP_ALIGN)\n+/* Attempt to maximize the headroom available for incoming frames.  We\n+ * use a 2K buffer for receives and need 1536/1534 to store the data for\n+ * the frame.  This leaves us with 512 bytes of room.  From that we need\n+ * to deduct the space needed for the shared info and the padding needed\n+ * to IP align the frame.\n+ *\n+ * Note: For cache line sizes 256 or larger this value is going to end\n+ *\t up negative.  In these cases we should fall back to the 3K\n+ *\t buffers.\n+ */\n #if (PAGE_SIZE < 8192)\n-#define IGB_MAX_FRAME_BUILD_SKB \\\n-\t(SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN)\n+#define IGB_MAX_2K_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN)\n+#define IGB_2K_TOO_SMALL_WITH_PADDING \\\n+((NET_SKB_PAD + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048))\n+\n+static inline int igb_compute_pad(int rx_buf_len)\n+{\n+\tint page_size, pad_size;\n+\n+\tpage_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);\n+\tpad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;\n+\n+\treturn pad_size;\n+}\n+\n+static inline int igb_skb_pad(void)\n+{\n+\tint rx_buf_len;\n+\n+\t/* If a 2K buffer cannot handle a standard Ethernet frame then\n+\t * optimize padding for a 3K buffer instead of a 1.5K buffer.\n+\t *\n+\t * For a 3K buffer we need to add enough padding to allow for\n+\t * tailroom due to NET_IP_ALIGN possibly shifting us out of\n+\t * cache-line alignment.\n+\t */\n+\tif (IGB_2K_TOO_SMALL_WITH_PADDING)\n+\t\trx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);\n+\telse\n+\t\trx_buf_len = IGB_RXBUFFER_1536;\n+\n+\t/* if needed make room for NET_IP_ALIGN */\n+\trx_buf_len -= NET_IP_ALIGN;\n+\n+\treturn igb_compute_pad(rx_buf_len);\n+}\n+\n+#define IGB_SKB_PAD\tigb_skb_pad()\n #else\n-#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN)\n+#define IGB_SKB_PAD\t(NET_SKB_PAD + NET_IP_ALIGN)\n #endif\n \n /* How many Rx Buffers do we bundle into one write to the hardware ? */\n@@ -194,13 +247,22 @@ enum igb_tx_flags {\n #define IGB_SFF_ADDRESSING_MODE\t\t0x4\n #define IGB_SFF_8472_UNSUP\t\t0x00\n \n+enum igb_tx_buf_type {\n+\tIGB_TYPE_SKB = 0,\n+\tIGB_TYPE_XDP,\n+};\n+\n /* wrapper around a pointer to a socket buffer,\n  * so a DMA handle can be stored along with the buffer\n  */\n struct igb_tx_buffer {\n \tunion e1000_adv_tx_desc *next_to_watch;\n \tunsigned long time_stamp;\n-\tstruct sk_buff *skb;\n+\tenum igb_tx_buf_type type;\n+\tunion {\n+\t\tstruct sk_buff *skb;\n+\t\tstruct xdp_frame *xdpf;\n+\t};\n \tunsigned int bytecount;\n \tu16 gso_segs;\n \t__be16 protocol;\n@@ -248,6 +310,7 @@ struct igb_ring_container {\n struct igb_ring {\n \tstruct igb_q_vector *q_vector;\t/* backlink to q_vector */\n \tstruct net_device *netdev;\t/* back pointer to net_device */\n+\tstruct bpf_prog *xdp_prog;\n \tstruct device *dev;\t\t/* device pointer for dma mapping */\n \tunion {\t\t\t\t/* array of buffer info structs */\n \t\tstruct igb_tx_buffer *tx_buffer_info;\n@@ -288,6 +351,9 @@ struct igb_ring {\n \t\t\tstruct u64_stats_sync rx_syncp;\n \t\t};\n \t};\n+\t/* lock for TX */\n+\tspinlock_t tx_lock;\n+\tstruct xdp_rxq_info xdp_rxq;\n } ____cacheline_internodealigned_in_smp;\n \n struct igb_q_vector {\n@@ -339,7 +405,7 @@ static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)\n \t\treturn IGB_RXBUFFER_3072;\n \n \tif (ring_uses_build_skb(ring))\n-\t\treturn IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;\n+\t\treturn IGB_MAX_2K_FRAME_BUILD_SKB;\n #endif\n \treturn IGB_RXBUFFER_2048;\n }\n@@ -467,6 +533,7 @@ struct igb_adapter {\n \tunsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];\n \n \tstruct net_device *netdev;\n+\tstruct bpf_prog *xdp_prog;\n \n \tunsigned long state;\n \tunsigned int flags;\n@@ -643,6 +710,9 @@ enum igb_boards {\n \n extern char igb_driver_name[];\n \n+int igb_xmit_xdp_ring(struct igb_adapter *adapter,\n+\t\t      struct igb_ring *ring,\n+\t\t      struct xdp_frame *xdpf);\n int igb_open(struct net_device *netdev);\n int igb_close(struct net_device *netdev);\n int igb_up(struct igb_adapter *);\n@@ -653,7 +723,7 @@ int igb_reinit_queues(struct igb_adapter *);\n void igb_write_rss_indir_tbl(struct igb_adapter *);\n int igb_set_spd_dplx(struct igb_adapter *, u32, u8);\n int igb_setup_tx_resources(struct igb_ring *);\n-int igb_setup_rx_resources(struct igb_ring *);\n+int igb_setup_rx_resources(struct igb_ring *, struct igb_adapter *);\n void igb_free_tx_resources(struct igb_ring *);\n void igb_free_rx_resources(struct igb_ring *);\n void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);\ndiff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c\nindex c2cf414d126b..66c3086ed475 100644\n--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c\n+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c\n@@ -961,8 +961,12 @@ static int igb_set_ringparam(struct net_device *netdev,\n \t\t\tmemcpy(&temp_ring[i], adapter->rx_ring[i],\n \t\t\t       sizeof(struct igb_ring));\n \n+\t\t\t/* Clear copied XDP RX-queue info */\n+\t\t\tmemset(&temp_ring[i].xdp_rxq, 0,\n+\t\t\t       sizeof(temp_ring[i].xdp_rxq));\n+\n \t\t\ttemp_ring[i].count = new_rx_count;\n-\t\t\terr = igb_setup_rx_resources(&temp_ring[i]);\n+\t\t\terr = igb_setup_rx_resources(&temp_ring[i], adapter);\n \t\t\tif (err) {\n \t\t\t\twhile (i) {\n \t\t\t\t\ti--;\n@@ -1577,7 +1581,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)\n \trx_ring->netdev = adapter->netdev;\n \trx_ring->reg_idx = adapter->vfs_allocated_count;\n \n-\tif (igb_setup_rx_resources(rx_ring)) {\n+\tif (igb_setup_rx_resources(rx_ring, adapter)) {\n \t\tret_val = 3;\n \t\tgoto err_nomem;\n \t}\ndiff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c\nindex ae8d64324619..e003245effae 100644\n--- a/drivers/net/ethernet/intel/igb/igb_main.c\n+++ b/drivers/net/ethernet/intel/igb/igb_main.c\n@@ -30,6 +30,8 @@\n #include <linux/if_ether.h>\n #include <linux/aer.h>\n #include <linux/prefetch.h>\n+#include <linux/bpf.h>\n+#include <linux/bpf_trace.h>\n #include <linux/pm_runtime.h>\n #include <linux/etherdevice.h>\n #ifdef CONFIG_IGB_DCA\n@@ -2825,6 +2827,147 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,\n \t}\n }\n \n+static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog)\n+{\n+\tint i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;\n+\tstruct igb_adapter *adapter = netdev_priv(dev);\n+\tstruct bpf_prog *old_prog;\n+\tbool running = netif_running(dev);\n+\tbool need_reset;\n+\n+\t/* verify igb ring attributes are sufficient for XDP */\n+\tfor (i = 0; i < adapter->num_rx_queues; i++) {\n+\t\tstruct igb_ring *ring = adapter->rx_ring[i];\n+\n+\t\tif (frame_size > igb_rx_bufsz(ring))\n+\t\t\treturn -EINVAL;\n+\t}\n+\n+\told_prog = xchg(&adapter->xdp_prog, prog);\n+\tneed_reset = (!!prog != !!old_prog);\n+\n+\t/* device is up and bpf is added/removed, must setup the RX queues */\n+\tif (need_reset && running) {\n+\t\tigb_close(dev);\n+\t} else {\n+\t\tfor (i = 0; i < adapter->num_rx_queues; i++)\n+\t\t\t(void)xchg(&adapter->rx_ring[i]->xdp_prog,\n+\t\t\t    adapter->xdp_prog);\n+\t}\n+\n+\tif (old_prog)\n+\t\tbpf_prog_put(old_prog);\n+\n+\t/* bpf is just replaced, RXQ and MTU are already setup */\n+\tif (!need_reset)\n+\t\treturn 0;\n+\n+\tif (running)\n+\t\tigb_open(dev);\n+\n+\treturn 0;\n+}\n+\n+static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)\n+{\n+\tstruct igb_adapter *adapter = netdev_priv(dev);\n+\n+\tswitch (xdp->command) {\n+\tcase XDP_SETUP_PROG:\n+\t\treturn igb_xdp_setup(dev, xdp->prog);\n+\tcase XDP_QUERY_PROG:\n+\t\txdp->prog_id = adapter->xdp_prog ?\n+\t\t\tadapter->xdp_prog->aux->id : 0;\n+\t\treturn 0;\n+\tdefault:\n+\t\treturn -EINVAL;\n+\t}\n+}\n+\n+void igb_xdp_ring_update_tail(struct igb_ring *ring)\n+{\n+\t/* Force memory writes to complete before letting h/w know there\n+\t * are new descriptors to fetch.\n+\t */\n+\twmb();\n+\twritel(ring->next_to_use, ring->tail);\n+}\n+\n+static inline struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)\n+{\n+\tunsigned int r_idx = smp_processor_id();\n+\n+\tif (r_idx >= adapter->num_tx_queues)\n+\t\tr_idx = r_idx % adapter->num_tx_queues;\n+\n+\treturn adapter->tx_ring[r_idx];\n+}\n+\n+static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)\n+{\n+\tstruct igb_ring *tx_ring;\n+\tstruct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);\n+\tu32 ret;\n+\n+\tif (unlikely(!xdpf))\n+\t\treturn IGB_XDP_CONSUMED;\n+\n+\t/* During program transitions its possible adapter->xdp_prog is assigned\n+\t * but ring has not been configured yet. In this case simply abort xmit.\n+\t */\n+\ttx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;\n+\tif (unlikely(!tx_ring))\n+\t\treturn -ENXIO;\n+\n+\tspin_lock(&tx_ring->tx_lock);\n+\tret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);\n+\tspin_unlock(&tx_ring->tx_lock);\n+\n+\treturn ret;\n+}\n+\n+static int igb_xdp_xmit(struct net_device *dev, int n,\n+\t\t\tstruct xdp_frame **frames, u32 flags)\n+{\n+\tstruct igb_adapter *adapter = netdev_priv(dev);\n+\tstruct igb_ring *tx_ring;\n+\tint drops = 0;\n+\tint i;\n+\n+\tif (unlikely(test_bit(__IGB_DOWN, &adapter->state)))\n+\t\treturn -ENETDOWN;\n+\n+\tif (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))\n+\t\treturn -EINVAL;\n+\n+\t/* During program transitions its possible adapter->xdp_prog is assigned\n+\t * but ring has not been configured yet. In this case simply abort xmit.\n+\t */\n+\ttx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;\n+\tif (unlikely(!tx_ring))\n+\t\treturn -ENXIO;\n+\n+\tspin_lock(&tx_ring->tx_lock);\n+\n+\tfor (i = 0; i < n; i++) {\n+\t\tstruct xdp_frame *xdpf = frames[i];\n+\t\tint err;\n+\n+\t\terr = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);\n+\t\tif (err != IGB_XDP_TX) {\n+\t\t\txdp_return_frame_rx_napi(xdpf);\n+\t\t\tdrops++;\n+\t\t}\n+\t}\n+\n+\tspin_unlock(&tx_ring->tx_lock);\n+\n+\tif (unlikely(flags & XDP_XMIT_FLUSH))\n+\t\tigb_xdp_ring_update_tail(tx_ring);\n+\n+\treturn n - drops;\n+}\n+\n static const struct net_device_ops igb_netdev_ops = {\n \t.ndo_open\t\t= igb_open,\n \t.ndo_stop\t\t= igb_close,\n@@ -2849,6 +2992,8 @@ static const struct net_device_ops igb_netdev_ops = {\n \t.ndo_fdb_add\t\t= igb_ndo_fdb_add,\n \t.ndo_features_check\t= igb_features_check,\n \t.ndo_setup_tc\t\t= igb_setup_tc,\n+\t.ndo_bpf\t\t= igb_xdp,\n+\t.ndo_xdp_xmit\t\t= igb_xdp_xmit,\n };\n \n /**\n@@ -4051,6 +4196,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)\n \ttx_ring->next_to_use = 0;\n \ttx_ring->next_to_clean = 0;\n \n+\tspin_lock_init(&tx_ring->tx_lock);\n+\n \treturn 0;\n \n err:\n@@ -4177,7 +4324,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)\n  *\n  *  Returns 0 on success, negative on failure\n  **/\n-int igb_setup_rx_resources(struct igb_ring *rx_ring)\n+int igb_setup_rx_resources(struct igb_ring *rx_ring, struct igb_adapter *adapter)\n {\n \tstruct device *dev = rx_ring->dev;\n \tint size;\n@@ -4201,6 +4348,13 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)\n \trx_ring->next_to_clean = 0;\n \trx_ring->next_to_use = 0;\n \n+\trx_ring->xdp_prog = adapter->xdp_prog;\n+\n+\t/* XDP RX-queue info */\n+\tif (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,\n+\t\t\t     rx_ring->queue_index) < 0)\n+\t\tgoto err;\n+\n \treturn 0;\n \n err:\n@@ -4223,7 +4377,7 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)\n \tint i, err = 0;\n \n \tfor (i = 0; i < adapter->num_rx_queues; i++) {\n-\t\terr = igb_setup_rx_resources(adapter->rx_ring[i]);\n+\t\terr = igb_setup_rx_resources(adapter->rx_ring[i], adapter);\n \t\tif (err) {\n \t\t\tdev_err(&pdev->dev,\n \t\t\t\t\"Allocation for Rx Queue %u failed\\n\", i);\n@@ -4505,6 +4659,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,\n \tint reg_idx = ring->reg_idx;\n \tu32 rxdctl = 0;\n \n+\txdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);\n+\tWARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,\n+\t\t\t\t\t   MEM_TYPE_PAGE_SHARED, NULL));\n+\n \t/* disable the queue */\n \twr32(E1000_RXDCTL(reg_idx), 0);\n \n@@ -4556,7 +4714,7 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,\n \tset_ring_build_skb_enabled(rx_ring);\n \n #if (PAGE_SIZE < 8192)\n-\tif (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)\n+\tif (adapter->max_frame_size <= IGB_MAX_2K_FRAME_BUILD_SKB)\n \t\treturn;\n \n \tset_ring_uses_large_buffer(rx_ring);\n@@ -4709,6 +4867,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)\n {\n \tigb_clean_rx_ring(rx_ring);\n \n+\trx_ring->xdp_prog = NULL;\n+\txdp_rxq_info_unreg(&rx_ring->xdp_rxq);\n \tvfree(rx_ring->rx_buffer_info);\n \trx_ring->rx_buffer_info = NULL;\n \n@@ -5054,8 +5214,8 @@ static void igb_set_rx_mode(struct net_device *netdev)\n \n #if (PAGE_SIZE < 8192)\n \tif (!adapter->vfs_allocated_count) {\n-\t\tif (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)\n-\t\t\trlpml = IGB_MAX_FRAME_BUILD_SKB;\n+\t\tif (adapter->max_frame_size <= IGB_MAX_2K_FRAME_BUILD_SKB)\n+\t\t\trlpml = IGB_MAX_2K_FRAME_BUILD_SKB;\n \t}\n #endif\n \twr32(E1000_RLPML, rlpml);\n@@ -5077,8 +5237,8 @@ static void igb_set_rx_mode(struct net_device *netdev)\n \t/* enable Rx jumbo frames, restrict as needed to support build_skb */\n \tvmolr &= ~E1000_VMOLR_RLPML_MASK;\n #if (PAGE_SIZE < 8192)\n-\tif (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)\n-\t\tvmolr |= IGB_MAX_FRAME_BUILD_SKB;\n+\tif (adapter->max_frame_size <= IGB_MAX_2K_FRAME_BUILD_SKB)\n+\t\tvmolr |= IGB_MAX_2K_FRAME_BUILD_SKB;\n \telse\n #endif\n \t\tvmolr |= MAX_JUMBO_FRAME_SIZE;\n@@ -6078,6 +6238,80 @@ static int igb_tx_map(struct igb_ring *tx_ring,\n \treturn -1;\n }\n \n+int igb_xmit_xdp_ring(struct igb_adapter *adapter,\n+\t\t      struct igb_ring *tx_ring,\n+\t\t      struct xdp_frame *xdpf)\n+{\n+\tstruct igb_tx_buffer *tx_buffer;\n+\tunion e1000_adv_tx_desc *tx_desc;\n+\tu32 len, cmd_type, olinfo_status;\n+\tdma_addr_t dma;\n+\tu16 i;\n+\n+\tlen = xdpf->len;\n+\n+\tif (unlikely(!igb_desc_unused(tx_ring)))\n+\t\treturn IGB_XDP_CONSUMED;\n+\n+\tdma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE);\n+\tif (dma_mapping_error(tx_ring->dev, dma))\n+\t\treturn IGB_XDP_CONSUMED;\n+\n+\t/* record the location of the first descriptor for this packet */\n+\ttx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use];\n+\ttx_buffer->bytecount = len;\n+\ttx_buffer->gso_segs = 1;\n+\ttx_buffer->protocol = 0;\n+\n+\ti = tx_ring->next_to_use;\n+\ttx_desc = IGB_TX_DESC(tx_ring, i);\n+\n+\tdma_unmap_len_set(tx_buffer, len, len);\n+\tdma_unmap_addr_set(tx_buffer, dma, dma);\n+\ttx_buffer->type = IGB_TYPE_XDP;\n+\ttx_buffer->xdpf = xdpf;\n+\n+\ttx_desc->read.buffer_addr = cpu_to_le64(dma);\n+\n+\t/* put descriptor type bits */\n+\tcmd_type = E1000_ADVTXD_DTYP_DATA |\n+\t\t   E1000_ADVTXD_DCMD_DEXT |\n+\t\t   E1000_ADVTXD_DCMD_IFCS;\n+\tcmd_type |= len | IGB_TXD_DCMD;\n+\ttx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);\n+\n+\tolinfo_status = cpu_to_le32(len << E1000_ADVTXD_PAYLEN_SHIFT);\n+\t/* 82575 requires a unique index per ring */\n+\tif (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))\n+\t\tolinfo_status |= tx_ring->reg_idx << 4;\n+\n+\ttx_desc->read.olinfo_status = olinfo_status;\n+\n+\tnetdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);\n+\n+\t/* set the timestamp */\n+\ttx_buffer->time_stamp = jiffies;\n+\n+\t/* Avoid any potential race with xdp_xmit and cleanup */\n+\tsmp_wmb();\n+\n+\t/* set next_to_watch value indicating a packet is present */\n+\ti++;\n+\tif (i == tx_ring->count)\n+\t\ti = 0;\n+\n+\ttx_buffer->next_to_watch = tx_desc;\n+\ttx_ring->next_to_use = i;\n+\n+\t/* Make sure there is space in the ring for the next send. */\n+\tigb_maybe_stop_tx(tx_ring, DESC_NEEDED);\n+\n+\tif (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())\n+\t\twritel(i, tx_ring->tail);\n+\n+\treturn IGB_XDP_TX;\n+}\n+\n netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,\n \t\t\t\tstruct igb_ring *tx_ring)\n {\n@@ -6089,6 +6323,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,\n \t__be16 protocol = vlan_get_protocol(skb);\n \tu8 hdr_len = 0;\n \n+\tspin_lock_bh(&tx_ring->tx_lock);\n+\n \t/* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,\n \t *       + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,\n \t *       + 2 desc gap to keep tail from touching head,\n@@ -6101,11 +6337,13 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,\n \n \tif (igb_maybe_stop_tx(tx_ring, count + 3)) {\n \t\t/* this is a hard error */\n+\t\tspin_unlock_bh(&tx_ring->tx_lock);\n \t\treturn NETDEV_TX_BUSY;\n \t}\n \n \t/* record the location of the first descriptor for this packet */\n \tfirst = &tx_ring->tx_buffer_info[tx_ring->next_to_use];\n+\tfirst->type = IGB_TYPE_SKB;\n \tfirst->skb = skb;\n \tfirst->bytecount = skb->len;\n \tfirst->gso_segs = 1;\n@@ -6146,6 +6384,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,\n \tif (igb_tx_map(tx_ring, first, hdr_len))\n \t\tgoto cleanup_tx_tstamp;\n \n+\tspin_unlock_bh(&tx_ring->tx_lock);\n+\n \treturn NETDEV_TX_OK;\n \n out_drop:\n@@ -6162,6 +6402,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,\n \t\tclear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);\n \t}\n \n+\tspin_unlock_bh(&tx_ring->tx_lock);\n+\n \treturn NETDEV_TX_OK;\n }\n \n@@ -6248,6 +6490,21 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)\n \tstruct igb_adapter *adapter = netdev_priv(netdev);\n \tint max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;\n \n+\tif (adapter->xdp_prog) {\n+\t\tint new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +\n+\t\t\t\t     VLAN_HLEN;\n+\t\tint i;\n+\n+\t\tfor (i = 0; i < adapter->num_rx_queues; i++) {\n+\t\t\tstruct igb_ring *ring = adapter->rx_ring[i];\n+\n+\t\t\tif (new_frame_size > igb_rx_bufsz(ring)) {\n+\t\t\t\tnetdev_warn(adapter->netdev, \"Requested MTU size is not supported with XDP\\n\");\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n \t/* adjust max frame to be at least the size of a standard frame */\n \tif (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))\n \t\tmax_frame = ETH_FRAME_LEN + ETH_FCS_LEN;\n@@ -7775,6 +8032,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)\n \tif (test_bit(__IGB_DOWN, &adapter->state))\n \t\treturn true;\n \n+\tspin_lock(&tx_ring->tx_lock);\n+\n \ttx_buffer = &tx_ring->tx_buffer_info[i];\n \ttx_desc = IGB_TX_DESC(tx_ring, i);\n \ti -= tx_ring->count;\n@@ -7801,7 +8060,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)\n \t\ttotal_packets += tx_buffer->gso_segs;\n \n \t\t/* free the skb */\n-\t\tnapi_consume_skb(tx_buffer->skb, napi_budget);\n+\t\tif (tx_buffer->type == IGB_TYPE_XDP)\n+\t\t\txdp_return_frame(tx_buffer->xdpf);\n+\t\telse\n+\t\t\tnapi_consume_skb(tx_buffer->skb, napi_budget);\n \n \t\t/* unmap skb header data */\n \t\tdma_unmap_single(tx_ring->dev,\n@@ -7854,6 +8116,9 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)\n \t\t\t\t  total_packets, total_bytes);\n \ti += tx_ring->count;\n \ttx_ring->next_to_clean = i;\n+\n+\tspin_unlock(&tx_ring->tx_lock);\n+\n \tu64_stats_update_begin(&tx_ring->tx_syncp);\n \ttx_ring->tx_stats.bytes += total_bytes;\n \ttx_ring->tx_stats.packets += total_packets;\n@@ -7985,8 +8250,8 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)\n \t * the pagecnt_bias and page count so that we fully restock the\n \t * number of references the driver holds.\n \t */\n-\tif (unlikely(!pagecnt_bias)) {\n-\t\tpage_ref_add(page, USHRT_MAX);\n+\tif (unlikely(pagecnt_bias == 1)) {\n+\t\tpage_ref_add(page, USHRT_MAX - 1);\n \t\trx_buffer->pagecnt_bias = USHRT_MAX;\n \t}\n \n@@ -8025,22 +8290,23 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,\n \n static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,\n \t\t\t\t\t struct igb_rx_buffer *rx_buffer,\n-\t\t\t\t\t union e1000_adv_rx_desc *rx_desc,\n-\t\t\t\t\t unsigned int size)\n+\t\t\t\t\t struct xdp_buff *xdp,\n+\t\t\t\t\t union e1000_adv_rx_desc *rx_desc)\n {\n-\tvoid *va = page_address(rx_buffer->page) + rx_buffer->page_offset;\n+\tunsigned int size = xdp->data_end - xdp->data;\n #if (PAGE_SIZE < 8192)\n \tunsigned int truesize = igb_rx_pg_size(rx_ring) / 2;\n #else\n-\tunsigned int truesize = SKB_DATA_ALIGN(size);\n+\tunsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -\n+\t\t\t\t\t       xdp->data_hard_start);\n #endif\n \tunsigned int headlen;\n \tstruct sk_buff *skb;\n \n \t/* prefetch first cache line of first page */\n-\tprefetch(va);\n+\tprefetch(xdp->data);\n #if L1_CACHE_BYTES < 128\n-\tprefetch(va + L1_CACHE_BYTES);\n+\tprefetch(xdp->data + L1_CACHE_BYTES);\n #endif\n \n \t/* allocate a skb to store the frags */\n@@ -8049,24 +8315,24 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,\n \t\treturn NULL;\n \n \tif (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {\n-\t\tigb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);\n-\t\tva += IGB_TS_HDR_LEN;\n+\t\tigb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb);\n+\t\txdp->data += IGB_TS_HDR_LEN;\n \t\tsize -= IGB_TS_HDR_LEN;\n \t}\n \n \t/* Determine available headroom for copy */\n \theadlen = size;\n \tif (headlen > IGB_RX_HDR_LEN)\n-\t\theadlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);\n+\t\theadlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN);\n \n \t/* align pull length to size of long to optimize memcpy performance */\n-\tmemcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));\n+\tmemcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long)));\n \n \t/* update all of the pointers */\n \tsize -= headlen;\n \tif (size) {\n \t\tskb_add_rx_frag(skb, 0, rx_buffer->page,\n-\t\t\t\t(va + headlen) - page_address(rx_buffer->page),\n+\t\t\t\t(xdp->data + headlen) - page_address(rx_buffer->page),\n \t\t\t\tsize, truesize);\n #if (PAGE_SIZE < 8192)\n \t\trx_buffer->page_offset ^= truesize;\n@@ -8082,32 +8348,32 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,\n \n static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,\n \t\t\t\t     struct igb_rx_buffer *rx_buffer,\n-\t\t\t\t     union e1000_adv_rx_desc *rx_desc,\n-\t\t\t\t     unsigned int size)\n+\t\t\t\t     struct xdp_buff *xdp,\n+\t\t\t\t     union e1000_adv_rx_desc *rx_desc)\n {\n-\tvoid *va = page_address(rx_buffer->page) + rx_buffer->page_offset;\n #if (PAGE_SIZE < 8192)\n \tunsigned int truesize = igb_rx_pg_size(rx_ring) / 2;\n #else\n \tunsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +\n-\t\t\t\tSKB_DATA_ALIGN(IGB_SKB_PAD + size);\n+\t\t\t\tSKB_DATA_ALIGN(xdp->data_end -\n+\t\t\t\t\t       xdp->data_hard_start);\n #endif\n \tstruct sk_buff *skb;\n \n \t/* prefetch first cache line of first page */\n-\tprefetch(va);\n+\tprefetch(xdp->data_meta);\n #if L1_CACHE_BYTES < 128\n-\tprefetch(va + L1_CACHE_BYTES);\n+\tprefetch(xdp->data_meta + L1_CACHE_BYTES);\n #endif\n \n \t/* build an skb around the page buffer */\n-\tskb = build_skb(va - IGB_SKB_PAD, truesize);\n+\tskb = build_skb(xdp->data_hard_start, truesize);\n \tif (unlikely(!skb))\n \t\treturn NULL;\n \n \t/* update pointers within the skb to store the data */\n-\tskb_reserve(skb, IGB_SKB_PAD);\n-\t__skb_put(skb, size);\n+\tskb_reserve(skb, xdp->data - xdp->data_hard_start);\n+\t__skb_put(skb, xdp->data_end - xdp->data);\n \n \t/* pull timestamp out of packet data */\n \tif (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {\n@@ -8125,6 +8391,79 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,\n \treturn skb;\n }\n \n+static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,\n+\t\t\t\t   struct igb_ring *rx_ring,\n+\t\t\t\t   struct xdp_buff *xdp)\n+{\n+\tint err, result = IGB_XDP_PASS;\n+\tstruct bpf_prog *xdp_prog;\n+\tu32 act;\n+\n+\trcu_read_lock();\n+\txdp_prog = READ_ONCE(rx_ring->xdp_prog);\n+\n+\tif (!xdp_prog)\n+\t\tgoto xdp_out;\n+\n+\tprefetchw(xdp->data_hard_start); /* xdp_frame write */\n+\n+\tact = bpf_prog_run_xdp(xdp_prog, xdp);\n+\tswitch (act) {\n+\tcase XDP_PASS:\n+\t\tbreak;\n+\tcase XDP_TX:\n+\t\tresult = igb_xdp_xmit_back(adapter, xdp);\n+\t\tbreak;\n+\tcase XDP_REDIRECT:\n+\t\terr = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);\n+\t\tif (!err)\n+\t\t\tresult = IGB_XDP_REDIR;\n+\t\telse\n+\t\t\tresult = IGB_XDP_CONSUMED;\n+\t\tbreak;\n+\tdefault:\n+\t\tbpf_warn_invalid_xdp_action(act);\n+\t\t/* fallthrough */\n+\tcase XDP_ABORTED:\n+\t\ttrace_xdp_exception(rx_ring->netdev, xdp_prog, act);\n+\t\t/* fallthrough -- handle aborts by dropping packet */\n+\tcase XDP_DROP:\n+\t\tresult = IGB_XDP_CONSUMED;\n+\t\tbreak;\n+\t}\n+xdp_out:\n+\trcu_read_unlock();\n+\treturn ERR_PTR(-result);\n+}\n+\n+static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,\n+\t\t\t\t\t  unsigned int size)\n+{\n+\tunsigned int truesize;\n+\n+#if (PAGE_SIZE < 8192)\n+\ttruesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */\n+#else\n+\ttruesize = ring_uses_build_skb(rx_ring) ?\n+\t\tSKB_DATA_ALIGN(IGB_SKB_PAD + size) +\n+\t\tSKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :\n+\t\tSKB_DATA_ALIGN(size);\n+#endif\n+\treturn truesize;\n+}\n+\n+static void igb_rx_buffer_flip(struct igb_ring *rx_ring,\n+\t\t\t       struct igb_rx_buffer *rx_buffer,\n+\t\t\t       unsigned int size)\n+{\n+\tunsigned int truesize = igb_rx_frame_truesize(rx_ring, size);\n+#if (PAGE_SIZE < 8192)\n+\trx_buffer->page_offset ^= truesize;\n+#else\n+\trx_buffer->page_offset += truesize;\n+#endif\n+}\n+\n static inline void igb_rx_checksum(struct igb_ring *ring,\n \t\t\t\t   union e1000_adv_rx_desc *rx_desc,\n \t\t\t\t   struct sk_buff *skb)\n@@ -8221,6 +8560,10 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,\n \t\t\t\tunion e1000_adv_rx_desc *rx_desc,\n \t\t\t\tstruct sk_buff *skb)\n {\n+\t/* XDP packets use error pointer so abort at this point */\n+\tif (IS_ERR(skb))\n+\t\treturn true;\n+\n \tif (unlikely((igb_test_staterr(rx_desc,\n \t\t\t\t       E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {\n \t\tstruct net_device *netdev = rx_ring->netdev;\n@@ -8279,6 +8622,11 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,\n \tskb->protocol = eth_type_trans(skb, rx_ring->netdev);\n }\n \n+static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)\n+{\n+\treturn ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;\n+}\n+\n static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,\n \t\t\t\t\t       const unsigned int size)\n {\n@@ -8323,9 +8671,19 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,\n static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)\n {\n \tstruct igb_ring *rx_ring = q_vector->rx.ring;\n+\tstruct igb_adapter *adapter = q_vector->adapter;\n \tstruct sk_buff *skb = rx_ring->skb;\n \tunsigned int total_bytes = 0, total_packets = 0;\n+\tunsigned int xdp_xmit = 0;\n \tu16 cleaned_count = igb_desc_unused(rx_ring);\n+\tstruct xdp_buff xdp;\n+\n+\txdp.rxq = &rx_ring->xdp_rxq;\n+\n+\t/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */\n+#if (PAGE_SIZE < 8192)\n+\txdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0);\n+#endif\n \n \twhile (likely(total_packets < budget)) {\n \t\tunion e1000_adv_rx_desc *rx_desc;\n@@ -8352,13 +8710,38 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)\n \t\trx_buffer = igb_get_rx_buffer(rx_ring, size);\n \n \t\t/* retrieve a buffer from the ring */\n-\t\tif (skb)\n+\t\tif (!skb) {\n+\t\t\txdp.data = page_address(rx_buffer->page) +\n+\t\t\t\t   rx_buffer->page_offset;\n+\t\t\txdp.data_meta = xdp.data;\n+\t\t\txdp.data_hard_start = xdp.data -\n+\t\t\t\t\t      igb_rx_offset(rx_ring);\n+\t\t\txdp.data_end = xdp.data + size;\n+#if (PAGE_SIZE > 4096)\n+\t\t\t/* At larger PAGE_SIZE, frame_sz depend on len size */\n+\t\t\txdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);\n+#endif\n+\t\t\tskb = igb_run_xdp(adapter, rx_ring, &xdp);\n+\t\t}\n+\n+\t\tif (IS_ERR(skb)) {\n+\t\t\tunsigned int xdp_res = -PTR_ERR(skb);\n+\n+\t\t\tif (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {\n+\t\t\t\txdp_xmit |= xdp_res;\n+\t\t\t\tigb_rx_buffer_flip(rx_ring, rx_buffer, size);\n+\t\t\t} else {\n+\t\t\t\trx_buffer->pagecnt_bias++;\n+\t\t\t}\n+\t\t\ttotal_packets++;\n+\t\t\ttotal_bytes += size;\n+\t\t} else if (skb)\n \t\t\tigb_add_rx_frag(rx_ring, rx_buffer, skb, size);\n \t\telse if (ring_uses_build_skb(rx_ring))\n-\t\t\tskb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);\n+\t\t\tskb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);\n \t\telse\n \t\t\tskb = igb_construct_skb(rx_ring, rx_buffer,\n-\t\t\t\t\t\trx_desc, size);\n+\t\t\t\t\t\t&xdp, rx_desc);\n \n \t\t/* exit if we failed to retrieve a buffer */\n \t\tif (!skb) {\n@@ -8398,6 +8781,15 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)\n \t/* place incomplete frames back on ring for completion */\n \trx_ring->skb = skb;\n \n+\tif (xdp_xmit & IGB_XDP_REDIR)\n+\t\txdp_do_flush_map();\n+\n+\tif (xdp_xmit & IGB_XDP_TX) {\n+\t\tstruct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);\n+\n+\t\tigb_xdp_ring_update_tail(tx_ring);\n+\t}\n+\n \tu64_stats_update_begin(&rx_ring->rx_syncp);\n \trx_ring->rx_stats.packets += total_packets;\n \trx_ring->rx_stats.bytes += total_bytes;\n@@ -8411,11 +8803,6 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)\n \treturn total_packets;\n }\n \n-static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)\n-{\n-\treturn ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;\n-}\n-\n static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,\n \t\t\t\t  struct igb_rx_buffer *bi)\n {\n@@ -8452,7 +8839,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,\n \tbi->dma = dma;\n \tbi->page = page;\n \tbi->page_offset = igb_rx_offset(rx_ring);\n-\tbi->pagecnt_bias = 1;\n+\tpage_ref_add(page, USHRT_MAX - 1);\n+\tbi->pagecnt_bias = USHRT_MAX;\n \n \treturn true;\n }\n",
    "prefixes": [
        "next-queue",
        "v2"
    ]
}