get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/743054/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 743054,
    "url": "http://patchwork.ozlabs.org/api/patches/743054/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20170324043145.10293.75036.stgit@john-Precision-Tower-5810/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20170324043145.10293.75036.stgit@john-Precision-Tower-5810>",
    "list_archive_url": null,
    "date": "2017-03-24T04:31:45",
    "name": "[v7,2/2] ixgbe: add support for XDP_TX action",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "8ca1848c61a2c5b2c2aa38d9ab64c95a399780aa",
    "submitter": {
        "id": 20028,
        "url": "http://patchwork.ozlabs.org/api/people/20028/?format=api",
        "name": "John Fastabend",
        "email": "john.fastabend@gmail.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20170324043145.10293.75036.stgit@john-Precision-Tower-5810/mbox/",
    "series": [],
    "comments": "http://patchwork.ozlabs.org/api/patches/743054/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/743054/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@lists.osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Received": [
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3vq9W74jPTz9s7s\n\tfor <incoming@patchwork.ozlabs.org>;\n\tFri, 24 Mar 2017 15:32:19 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 375918905A;\n\tFri, 24 Mar 2017 04:32:18 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id SLHi1UcnVIcW; Fri, 24 Mar 2017 04:32:14 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 6AB8888F2E;\n\tFri, 24 Mar 2017 04:32:14 +0000 (UTC)",
            "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id 856711C0586\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tFri, 24 Mar 2017 04:32:12 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 80851899B8\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tFri, 24 Mar 2017 04:32:12 +0000 (UTC)",
            "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 0MZj4XAWjj6i for <intel-wired-lan@lists.osuosl.org>;\n\tFri, 24 Mar 2017 04:32:10 +0000 (UTC)",
            "from mail-pg0-f67.google.com (mail-pg0-f67.google.com\n\t[74.125.83.67])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id AADE0899B2\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tFri, 24 Mar 2017 04:32:10 +0000 (UTC)",
            "by mail-pg0-f67.google.com with SMTP id 81so746323pgh.3\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 23 Mar 2017 21:32:10 -0700 (PDT)",
            "from [127.0.1.1] ([72.168.144.114])\n\tby smtp.gmail.com with ESMTPSA id\n\tt66sm1222914pfk.53.2017.03.23.21.31.59\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tThu, 23 Mar 2017 21:32:09 -0700 (PDT)"
        ],
        "Authentication-Results": "ozlabs.org;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n\tunprotected) header.d=gmail.com header.i=@gmail.com\n\theader.b=\"MdqKRNFs\"; dkim-atps=neutral",
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;\n\th=from:subject:to:cc:date:message-id:in-reply-to:references\n\t:user-agent:mime-version:content-transfer-encoding;\n\tbh=H0t3y0G1aJkgo9Q/Z/iYsh3O9SgGcwtSPfJcVVjo5Gk=;\n\tb=MdqKRNFszVdtEp/+I+PGnuHDLjbpzBsYmkoHcbhoPfJ+EPa+glu7xmSgMY7NbYPwn4\n\tWhMZgb39oolyrEClsHsLwEy8nKTk1pxZ3m5Ol1z9Rj6mleO6KXbQHtVceDdQ6qb/9pgZ\n\tFe3fvBkI7SytKzR7vCIhNxFgPF5iK7MQIU9wWx7UvVCadmT4CO3xdN+88lrWpEY8qunD\n\tdwq3KSjIgyTUxXXmSpIS7BoMfZWue5c8zocSmH34A41Hxbpb3hJ08t1iro5GpgRDzkha\n\txJSSGeVMArCUbl/Rz9mvVrdByddMrCu76Mjkc/sg6b7mHIMTQ7YMoVCUXFWOXBKqx2m2\n\tjNHg==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:subject:to:cc:date:message-id:in-reply-to\n\t:references:user-agent:mime-version:content-transfer-encoding;\n\tbh=H0t3y0G1aJkgo9Q/Z/iYsh3O9SgGcwtSPfJcVVjo5Gk=;\n\tb=VQSo4Ty3Uu5BWijSsuqQGhLrfxYQ8SBS0Ia2cmIoIrCGxpQ0JIY3NEu/Di2CEXuRZI\n\toWzLZAQZcEjxOYyF6IMWnleN2kJhU/RQQD0hJFKMbn/fhtpAQaj7inB+yj2xJQff2n5p\n\tQ5LbNL5hGlBGVfB2ZUEnGpuHrYfyjPcdwhyXc6Hfg1QCuQNoJAct2CZugxw8NmxFGajN\n\tOopZPlnz7tYy65q/qCeHqbEH2V5WCsNRt8pNUu99M4KAFNiT67zhOaGSZXmv76GxZ39y\n\teemPvYmtlupwgMPZd+JpHsarD9FKeHV4mVbqpOlVGn2nr2B2neYlCXdEN5R9aYzV2MRA\n\tLVFQ==",
        "X-Gm-Message-State": "AFeK/H2sGmNkXnhOZqLDqnXnOAOVY5JASULytUZMWhAAnk/y1BQc3uyallB4zlMVJ0o4tQ==",
        "X-Received": "by 10.84.217.216 with SMTP id d24mr7964540plj.80.1490329929907; \n\tThu, 23 Mar 2017 21:32:09 -0700 (PDT)",
        "From": "John Fastabend <john.fastabend@gmail.com>",
        "X-Google-Original-From": "John Fastabend <john.r.fastabend@intel.com>",
        "To": "john.fastabend@gmail.com, alexander.duyck@gmail.com",
        "Date": "Thu, 23 Mar 2017 21:31:45 -0700",
        "Message-ID": "<20170324043145.10293.75036.stgit@john-Precision-Tower-5810>",
        "In-Reply-To": "<20170324042949.10293.43329.stgit@john-Precision-Tower-5810>",
        "References": "<20170324042949.10293.43329.stgit@john-Precision-Tower-5810>",
        "User-Agent": "StGit/0.17.1-dirty",
        "MIME-Version": "1.0",
        "Cc": "intel-wired-lan@lists.osuosl.org, u9012063@gmail.com",
        "Subject": "[Intel-wired-lan] [PATCH v7 2/2] ixgbe: add support for XDP_TX\n\taction",
        "X-BeenThere": "intel-wired-lan@lists.osuosl.org",
        "X-Mailman-Version": "2.1.18-1",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.lists.osuosl.org>",
        "List-Unsubscribe": "<http://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@lists.osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@lists.osuosl.org?subject=help>",
        "List-Subscribe": "<http://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=subscribe>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@lists.osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@lists.osuosl.org>"
    },
    "content": "Add support for XDP_TX action.\n\nA couple design choices were made here. First I use a new ring\npointer structure xdp_ring[] in the adapter struct instead of\npushing the newly allocated xdp TX rings into the tx_ring[]\nstructure. This means we have to duplicate loops around rings\nin places we want to initialize both TX rings and XDP rings.\nBut by making it explicit it is obvious when we are using XDP\nrings and when we are using TX rings. Further we don't have\nto do ring arithmatic which is error prone. As a proof point\nfor doing this my first patches used only a single ring structure\nand introduced bugs in FCoE code and macvlan code paths.\n\nSecond I am aware this is not the most optimized version of\nthis code possible. I want to get baseline support in using\nthe most readable format possible and then once this series\nis included I will optimize the TX path in another series\nof patches.\n\nSigned-off-by: John Fastabend <john.r.fastabend@intel.com>\n---\n drivers/net/ethernet/intel/ixgbe/ixgbe.h         |   19 +\n drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c |   25 ++\n drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c     |   78 +++++-\n drivers/net/ethernet/intel/ixgbe/ixgbe_main.c    |  279 +++++++++++++++++++---\n 4 files changed, 350 insertions(+), 51 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h\nindex cb14813..e8cd449 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h\n@@ -235,7 +235,11 @@ struct vf_macvlans {\n struct ixgbe_tx_buffer {\n \tunion ixgbe_adv_tx_desc *next_to_watch;\n \tunsigned long time_stamp;\n-\tstruct sk_buff *skb;\n+\tunion {\n+\t\tstruct sk_buff *skb;\n+\t\t/* XDP uses address ptr on irq_clean */\n+\t\tvoid *data;\n+\t};\n \tunsigned int bytecount;\n \tunsigned short gso_segs;\n \t__be16 protocol;\n@@ -288,6 +292,7 @@ enum ixgbe_ring_state_t {\n \t__IXGBE_TX_XPS_INIT_DONE,\n \t__IXGBE_TX_DETECT_HANG,\n \t__IXGBE_HANG_CHECK_ARMED,\n+\t__IXGBE_TX_XDP_RING,\n };\n \n #define ring_uses_build_skb(ring) \\\n@@ -314,6 +319,12 @@ struct ixgbe_fwd_adapter {\n \tset_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)\n #define clear_ring_rsc_enabled(ring) \\\n \tclear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)\n+#define ring_is_xdp(ring) \\\n+\ttest_bit(__IXGBE_TX_XDP_RING, &(ring)->state)\n+#define set_ring_xdp(ring) \\\n+\tset_bit(__IXGBE_TX_XDP_RING, &(ring)->state)\n+#define clear_ring_xdp(ring) \\\n+\tclear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)\n struct ixgbe_ring {\n \tstruct ixgbe_ring *next;\t/* pointer to next ring in q_vector */\n \tstruct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */\n@@ -380,6 +391,7 @@ enum ixgbe_ring_f_enum {\n #define IXGBE_MAX_FCOE_INDICES\t\t8\n #define MAX_RX_QUEUES\t\t\t(IXGBE_MAX_FDIR_INDICES + 1)\n #define MAX_TX_QUEUES\t\t\t(IXGBE_MAX_FDIR_INDICES + 1)\n+#define MAX_XDP_QUEUES\t\t\t(IXGBE_MAX_FDIR_INDICES + 1)\n #define IXGBE_MAX_L2A_QUEUES\t\t4\n #define IXGBE_BAD_L2A_QUEUE\t\t3\n #define IXGBE_MAX_MACVLANS\t\t31\n@@ -623,6 +635,10 @@ struct ixgbe_adapter {\n \t__be16 vxlan_port;\n \t__be16 geneve_port;\n \n+\t/* XDP */\n+\tint num_xdp_queues;\n+\tstruct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];\n+\n \t/* TX */\n \tstruct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;\n \n@@ -669,6 +685,7 @@ struct ixgbe_adapter {\n \n \tu64 tx_busy;\n \tunsigned int tx_ring_count;\n+\tunsigned int xdp_ring_count;\n \tunsigned int rx_ring_count;\n \n \tu32 link_speed;\ndiff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c\nindex 79a126d..b0fd2f5 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c\n@@ -1071,15 +1071,19 @@ static int ixgbe_set_ringparam(struct net_device *netdev,\n \tif (!netif_running(adapter->netdev)) {\n \t\tfor (i = 0; i < adapter->num_tx_queues; i++)\n \t\t\tadapter->tx_ring[i]->count = new_tx_count;\n+\t\tfor (i = 0; i < adapter->num_xdp_queues; i++)\n+\t\t\tadapter->xdp_ring[i]->count = new_tx_count;\n \t\tfor (i = 0; i < adapter->num_rx_queues; i++)\n \t\t\tadapter->rx_ring[i]->count = new_rx_count;\n \t\tadapter->tx_ring_count = new_tx_count;\n+\t\tadapter->xdp_ring_count = new_tx_count;\n \t\tadapter->rx_ring_count = new_rx_count;\n \t\tgoto clear_reset;\n \t}\n \n \t/* allocate temporary buffer to store rings in */\n \ti = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);\n+\ti = max_t(int, i, adapter->num_xdp_queues);\n \ttemp_ring = vmalloc(i * sizeof(struct ixgbe_ring));\n \n \tif (!temp_ring) {\n@@ -1111,12 +1115,33 @@ static int ixgbe_set_ringparam(struct net_device *netdev,\n \t\t\t}\n \t\t}\n \n+\t\tfor (i = 0; i < adapter->num_xdp_queues; i++) {\n+\t\t\tmemcpy(&temp_ring[i], adapter->xdp_ring[i],\n+\t\t\t       sizeof(struct ixgbe_ring));\n+\n+\t\t\ttemp_ring[i].count = new_tx_count;\n+\t\t\terr = ixgbe_setup_tx_resources(&temp_ring[i]);\n+\t\t\tif (err) {\n+\t\t\t\twhile (i) {\n+\t\t\t\t\ti--;\n+\t\t\t\t\tixgbe_free_tx_resources(&temp_ring[i]);\n+\t\t\t\t}\n+\t\t\t\tgoto err_setup;\n+\t\t\t}\n+\t\t}\n+\n \t\tfor (i = 0; i < adapter->num_tx_queues; i++) {\n \t\t\tixgbe_free_tx_resources(adapter->tx_ring[i]);\n \n \t\t\tmemcpy(adapter->tx_ring[i], &temp_ring[i],\n \t\t\t       sizeof(struct ixgbe_ring));\n \t\t}\n+\t\tfor (i = 0; i < adapter->num_xdp_queues; i++) {\n+\t\t\tixgbe_free_tx_resources(adapter->xdp_ring[i]);\n+\n+\t\t\tmemcpy(adapter->xdp_ring[i], &temp_ring[i],\n+\t\t\t       sizeof(struct ixgbe_ring));\n+\t\t}\n \n \t\tadapter->tx_ring_count = new_tx_count;\n \t}\ndiff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c\nindex 1b8be7d..4c06d3b 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c\n@@ -267,12 +267,14 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)\n  **/\n static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)\n {\n-\tint i;\n+\tint i, reg_idx;\n \n \tfor (i = 0; i < adapter->num_rx_queues; i++)\n \t\tadapter->rx_ring[i]->reg_idx = i;\n-\tfor (i = 0; i < adapter->num_tx_queues; i++)\n-\t\tadapter->tx_ring[i]->reg_idx = i;\n+\tfor (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)\n+\t\tadapter->tx_ring[i]->reg_idx = reg_idx;\n+\tfor (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)\n+\t\tadapter->xdp_ring[i]->reg_idx = reg_idx;\n \n \treturn true;\n }\n@@ -308,6 +310,14 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)\n \tixgbe_cache_ring_rss(adapter);\n }\n \n+static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)\n+{\n+\tif (nr_cpu_ids > MAX_XDP_QUEUES)\n+\t\treturn 0;\n+\n+\treturn adapter->xdp_prog ? nr_cpu_ids : 0;\n+}\n+\n #define IXGBE_RSS_64Q_MASK\t0x3F\n #define IXGBE_RSS_16Q_MASK\t0xF\n #define IXGBE_RSS_8Q_MASK\t0x7\n@@ -382,6 +392,7 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)\n \tadapter->num_rx_queues_per_pool = tcs;\n \n \tadapter->num_tx_queues = vmdq_i * tcs;\n+\tadapter->num_xdp_queues = 0;\n \tadapter->num_rx_queues = vmdq_i * tcs;\n \n #ifdef IXGBE_FCOE\n@@ -479,6 +490,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)\n \t\tnetdev_set_tc_queue(dev, i, rss_i, rss_i * i);\n \n \tadapter->num_tx_queues = rss_i * tcs;\n+\tadapter->num_xdp_queues = 0;\n \tadapter->num_rx_queues = rss_i * tcs;\n \n \treturn true;\n@@ -549,6 +561,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)\n \n \tadapter->num_rx_queues = vmdq_i * rss_i;\n \tadapter->num_tx_queues = vmdq_i * rss_i;\n+\tadapter->num_xdp_queues = 0;\n \n \t/* disable ATR as it is not supported when VMDq is enabled */\n \tadapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;\n@@ -669,6 +682,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)\n #endif /* IXGBE_FCOE */\n \tadapter->num_rx_queues = rss_i;\n \tadapter->num_tx_queues = rss_i;\n+\tadapter->num_xdp_queues = ixgbe_xdp_queues(adapter);\n \n \treturn true;\n }\n@@ -689,6 +703,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)\n \t/* Start with base case */\n \tadapter->num_rx_queues = 1;\n \tadapter->num_tx_queues = 1;\n+\tadapter->num_xdp_queues = 0;\n \tadapter->num_rx_pools = adapter->num_rx_queues;\n \tadapter->num_rx_queues_per_pool = 1;\n \n@@ -719,8 +734,11 @@ static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)\n \tstruct ixgbe_hw *hw = &adapter->hw;\n \tint i, vectors, vector_threshold;\n \n-\t/* We start by asking for one vector per queue pair */\n+\t/* We start by asking for one vector per queue pair with XDP queues\n+\t * being stacked with TX queues.\n+\t */\n \tvectors = max(adapter->num_rx_queues, adapter->num_tx_queues);\n+\tvectors = max(vectors, adapter->num_xdp_queues);\n \n \t/* It is easy to be greedy for MSI-X vectors. However, it really\n \t * doesn't do much good if we have a lot more vectors than CPUs. We'll\n@@ -800,6 +818,8 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,\n  * @v_idx: index of vector in adapter struct\n  * @txr_count: total number of Tx rings to allocate\n  * @txr_idx: index of first Tx ring to allocate\n+ * @xdp_count: total number of XDP rings to allocate\n+ * @xdp_idx: index of first XDP ring to allocate\n  * @rxr_count: total number of Rx rings to allocate\n  * @rxr_idx: index of first Rx ring to allocate\n  *\n@@ -808,6 +828,7 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,\n static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,\n \t\t\t\tint v_count, int v_idx,\n \t\t\t\tint txr_count, int txr_idx,\n+\t\t\t\tint xdp_count, int xdp_idx,\n \t\t\t\tint rxr_count, int rxr_idx)\n {\n \tstruct ixgbe_q_vector *q_vector;\n@@ -817,7 +838,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,\n \tint ring_count, size;\n \tu8 tcs = netdev_get_num_tc(adapter->netdev);\n \n-\tring_count = txr_count + rxr_count;\n+\tring_count = txr_count + rxr_count + xdp_count;\n \tsize = sizeof(struct ixgbe_q_vector) +\n \t       (sizeof(struct ixgbe_ring) * ring_count);\n \n@@ -909,6 +930,33 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,\n \t\tring++;\n \t}\n \n+\twhile (xdp_count) {\n+\t\t/* assign generic ring traits */\n+\t\tring->dev = &adapter->pdev->dev;\n+\t\tring->netdev = adapter->netdev;\n+\n+\t\t/* configure backlink on ring */\n+\t\tring->q_vector = q_vector;\n+\n+\t\t/* update q_vector Tx values */\n+\t\tixgbe_add_ring(ring, &q_vector->tx);\n+\n+\t\t/* apply Tx specific ring traits */\n+\t\tring->count = adapter->tx_ring_count;\n+\t\tring->queue_index = xdp_idx;\n+\t\tset_ring_xdp(ring);\n+\n+\t\t/* assign ring to adapter */\n+\t\tadapter->xdp_ring[xdp_idx] = ring;\n+\n+\t\t/* update count and index */\n+\t\txdp_count--;\n+\t\txdp_idx++;\n+\n+\t\t/* push pointer to next ring */\n+\t\tring++;\n+\t}\n+\n \twhile (rxr_count) {\n \t\t/* assign generic ring traits */\n \t\tring->dev = &adapter->pdev->dev;\n@@ -1002,17 +1050,18 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)\n \tint q_vectors = adapter->num_q_vectors;\n \tint rxr_remaining = adapter->num_rx_queues;\n \tint txr_remaining = adapter->num_tx_queues;\n-\tint rxr_idx = 0, txr_idx = 0, v_idx = 0;\n+\tint xdp_remaining = adapter->num_xdp_queues;\n+\tint rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;\n \tint err;\n \n \t/* only one q_vector if MSI-X is disabled. */\n \tif (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))\n \t\tq_vectors = 1;\n \n-\tif (q_vectors >= (rxr_remaining + txr_remaining)) {\n+\tif (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {\n \t\tfor (; rxr_remaining; v_idx++) {\n \t\t\terr = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,\n-\t\t\t\t\t\t   0, 0, 1, rxr_idx);\n+\t\t\t\t\t\t   0, 0, 0, 0, 1, rxr_idx);\n \n \t\t\tif (err)\n \t\t\t\tgoto err_out;\n@@ -1026,8 +1075,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)\n \tfor (; v_idx < q_vectors; v_idx++) {\n \t\tint rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);\n \t\tint tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);\n+\t\tint xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);\n+\n \t\terr = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,\n \t\t\t\t\t   tqpv, txr_idx,\n+\t\t\t\t\t   xqpv, xdp_idx,\n \t\t\t\t\t   rqpv, rxr_idx);\n \n \t\tif (err)\n@@ -1036,14 +1088,17 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)\n \t\t/* update counts and index */\n \t\trxr_remaining -= rqpv;\n \t\ttxr_remaining -= tqpv;\n+\t\txdp_remaining -= xqpv;\n \t\trxr_idx++;\n \t\ttxr_idx++;\n+\t\txdp_idx += xqpv;\n \t}\n \n \treturn 0;\n \n err_out:\n \tadapter->num_tx_queues = 0;\n+\tadapter->num_xdp_queues = 0;\n \tadapter->num_rx_queues = 0;\n \tadapter->num_q_vectors = 0;\n \n@@ -1066,6 +1121,7 @@ static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)\n \tint v_idx = adapter->num_q_vectors;\n \n \tadapter->num_tx_queues = 0;\n+\tadapter->num_xdp_queues = 0;\n \tadapter->num_rx_queues = 0;\n \tadapter->num_q_vectors = 0;\n \n@@ -1172,9 +1228,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)\n \n \tixgbe_cache_ring_register(adapter);\n \n-\te_dev_info(\"Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\\n\",\n+\te_dev_info(\"Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\\n\",\n \t\t   (adapter->num_rx_queues > 1) ? \"Enabled\" : \"Disabled\",\n-\t\t   adapter->num_rx_queues, adapter->num_tx_queues);\n+\t\t   adapter->num_rx_queues, adapter->num_tx_queues,\n+\t\t   adapter->num_xdp_queues);\n \n \tset_bit(__IXGBE_DOWN, &adapter->state);\n \n@@ -1195,6 +1252,7 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)\n void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)\n {\n \tadapter->num_tx_queues = 0;\n+\tadapter->num_xdp_queues = 0;\n \tadapter->num_rx_queues = 0;\n \n \tixgbe_free_q_vectors(adapter);\ndiff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\nindex ddb75ba..cd7eefd 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\n@@ -594,6 +594,19 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)\n \n }\n \n+static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)\n+{\n+\tstruct ixgbe_tx_buffer *tx_buffer;\n+\n+\ttx_buffer = &ring->tx_buffer_info[ring->next_to_clean];\n+\tpr_info(\" %5d %5X %5X %016llX %08X %p %016llX\\n\",\n+\t\tn, ring->next_to_use, ring->next_to_clean,\n+\t\t(u64)dma_unmap_addr(tx_buffer, dma),\n+\t\tdma_unmap_len(tx_buffer, len),\n+\t\ttx_buffer->next_to_watch,\n+\t\t(u64)tx_buffer->time_stamp);\n+}\n+\n /*\n  * ixgbe_dump - Print registers, tx-rings and rx-rings\n  */\n@@ -603,7 +616,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)\n \tstruct ixgbe_hw *hw = &adapter->hw;\n \tstruct ixgbe_reg_info *reginfo;\n \tint n = 0;\n-\tstruct ixgbe_ring *tx_ring;\n+\tstruct ixgbe_ring *ring;\n \tstruct ixgbe_tx_buffer *tx_buffer;\n \tunion ixgbe_adv_tx_desc *tx_desc;\n \tstruct my_u0 { u64 a; u64 b; } *u0;\n@@ -644,14 +657,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)\n \t\t\"Queue [NTU] [NTC] [bi(ntc)->dma  ]\",\n \t\t\"leng\", \"ntw\", \"timestamp\");\n \tfor (n = 0; n < adapter->num_tx_queues; n++) {\n-\t\ttx_ring = adapter->tx_ring[n];\n-\t\ttx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];\n-\t\tpr_info(\" %5d %5X %5X %016llX %08X %p %016llX\\n\",\n-\t\t\t   n, tx_ring->next_to_use, tx_ring->next_to_clean,\n-\t\t\t   (u64)dma_unmap_addr(tx_buffer, dma),\n-\t\t\t   dma_unmap_len(tx_buffer, len),\n-\t\t\t   tx_buffer->next_to_watch,\n-\t\t\t   (u64)tx_buffer->time_stamp);\n+\t\tring = adapter->tx_ring[n];\n+\t\tixgbe_print_buffer(ring, n);\n+\t}\n+\n+\tfor (n = 0; n < adapter->num_xdp_queues; n++) {\n+\t\tring = adapter->xdp_ring[n];\n+\t\tixgbe_print_buffer(ring, n);\n \t}\n \n \t/* Print TX Rings */\n@@ -696,28 +708,28 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)\n \t */\n \n \tfor (n = 0; n < adapter->num_tx_queues; n++) {\n-\t\ttx_ring = adapter->tx_ring[n];\n+\t\tring = adapter->tx_ring[n];\n \t\tpr_info(\"------------------------------------\\n\");\n-\t\tpr_info(\"TX QUEUE INDEX = %d\\n\", tx_ring->queue_index);\n+\t\tpr_info(\"TX QUEUE INDEX = %d\\n\", ring->queue_index);\n \t\tpr_info(\"------------------------------------\\n\");\n \t\tpr_info(\"%s%s    %s              %s        %s          %s\\n\",\n \t\t\t\"T [desc]     [address 63:0  ] \",\n \t\t\t\"[PlPOIdStDDt Ln] [bi->dma       ] \",\n \t\t\t\"leng\", \"ntw\", \"timestamp\", \"bi->skb\");\n \n-\t\tfor (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {\n-\t\t\ttx_desc = IXGBE_TX_DESC(tx_ring, i);\n-\t\t\ttx_buffer = &tx_ring->tx_buffer_info[i];\n+\t\tfor (i = 0; ring->desc && (i < ring->count); i++) {\n+\t\t\ttx_desc = IXGBE_TX_DESC(ring, i);\n+\t\t\ttx_buffer = &ring->tx_buffer_info[i];\n \t\t\tu0 = (struct my_u0 *)tx_desc;\n \t\t\tif (dma_unmap_len(tx_buffer, len) > 0) {\n \t\t\t\tconst char *ring_desc;\n \n-\t\t\t\tif (i == tx_ring->next_to_use &&\n-\t\t\t\t    i == tx_ring->next_to_clean)\n+\t\t\t\tif (i == ring->next_to_use &&\n+\t\t\t\t    i == ring->next_to_clean)\n \t\t\t\t\tring_desc = \" NTC/U\";\n-\t\t\t\telse if (i == tx_ring->next_to_use)\n+\t\t\t\telse if (i == ring->next_to_use)\n \t\t\t\t\tring_desc = \" NTU\";\n-\t\t\t\telse if (i == tx_ring->next_to_clean)\n+\t\t\t\telse if (i == ring->next_to_clean)\n \t\t\t\t\tring_desc = \" NTC\";\n \t\t\t\telse\n \t\t\t\t\tring_desc = \"\";\n@@ -987,6 +999,10 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)\n \tfor (i = 0; i < adapter->num_tx_queues; i++)\n \t\tclear_bit(__IXGBE_HANG_CHECK_ARMED,\n \t\t\t  &adapter->tx_ring[i]->state);\n+\n+\tfor (i = 0; i < adapter->num_xdp_queues; i++)\n+\t\tclear_bit(__IXGBE_HANG_CHECK_ARMED,\n+\t\t\t  &adapter->xdp_ring[i]->state);\n }\n \n static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)\n@@ -1031,6 +1047,14 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)\n \t\tif (xoff[tc])\n \t\t\tclear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);\n \t}\n+\n+\tfor (i = 0; i < adapter->num_xdp_queues; i++) {\n+\t\tstruct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];\n+\n+\t\ttc = xdp_ring->dcb_tc;\n+\t\tif (xoff[tc])\n+\t\t\tclear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);\n+\t}\n }\n \n static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)\n@@ -1182,7 +1206,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,\n \t\ttotal_packets += tx_buffer->gso_segs;\n \n \t\t/* free the skb */\n-\t\tnapi_consume_skb(tx_buffer->skb, napi_budget);\n+\t\tif (ring_is_xdp(tx_ring))\n+\t\t\tpage_frag_free(tx_buffer->data);\n+\t\telse\n+\t\t\tnapi_consume_skb(tx_buffer->skb, napi_budget);\n \n \t\t/* unmap skb header data */\n \t\tdma_unmap_single(tx_ring->dev,\n@@ -1243,7 +1270,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,\n \tif (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {\n \t\t/* schedule immediate reset if we believe we hung */\n \t\tstruct ixgbe_hw *hw = &adapter->hw;\n-\t\te_err(drv, \"Detected Tx Unit Hang\\n\"\n+\t\te_err(drv, \"Detected Tx Unit Hang %s\\n\"\n \t\t\t\"  Tx Queue             <%d>\\n\"\n \t\t\t\"  TDH, TDT             <%x>, <%x>\\n\"\n \t\t\t\"  next_to_use          <%x>\\n\"\n@@ -1251,13 +1278,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,\n \t\t\t\"tx_buffer_info[next_to_clean]\\n\"\n \t\t\t\"  time_stamp           <%lx>\\n\"\n \t\t\t\"  jiffies              <%lx>\\n\",\n+\t\t\tring_is_xdp(tx_ring) ? \"(XDP)\" : \"\",\n \t\t\ttx_ring->queue_index,\n \t\t\tIXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),\n \t\t\tIXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),\n \t\t\ttx_ring->next_to_use, i,\n \t\t\ttx_ring->tx_buffer_info[i].time_stamp, jiffies);\n \n-\t\tnetif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);\n+\t\tif (!ring_is_xdp(tx_ring))\n+\t\t\tnetif_stop_subqueue(tx_ring->netdev,\n+\t\t\t\t\t    tx_ring->queue_index);\n \n \t\te_info(probe,\n \t\t       \"tx hang %d detected on queue %d, resetting adapter\\n\",\n@@ -1270,6 +1300,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,\n \t\treturn true;\n \t}\n \n+\tif (ring_is_xdp(tx_ring))\n+\t\treturn !!budget;\n+\n \tnetdev_tx_completed_queue(txring_txq(tx_ring),\n \t\t\t\t  total_packets, total_bytes);\n \n@@ -2170,8 +2203,13 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,\n \n #define IXGBE_XDP_PASS 0\n #define IXGBE_XDP_CONSUMED 1\n+#define IXGBE_XDP_TX 2\n \n-static struct sk_buff *ixgbe_run_xdp(struct ixgbe_ring  *rx_ring,\n+static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,\n+\t\t\t       struct xdp_buff *xdp);\n+\n+static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,\n+\t\t\t\t     struct ixgbe_ring *rx_ring,\n \t\t\t\t     struct xdp_buff *xdp)\n {\n \tint result = IXGBE_XDP_PASS;\n@@ -2188,9 +2226,11 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_ring  *rx_ring,\n \tswitch (act) {\n \tcase XDP_PASS:\n \t\tbreak;\n+\tcase XDP_TX:\n+\t\tresult = ixgbe_xmit_xdp_ring(adapter, xdp);\n+\t\tbreak;\n \tdefault:\n \t\tbpf_warn_invalid_xdp_action(act);\n-\tcase XDP_TX:\n \tcase XDP_ABORTED:\n \t\ttrace_xdp_exception(rx_ring->netdev, xdp_prog, act);\n \t\t/* fallthrough -- handle aborts by dropping packet */\n@@ -2203,6 +2243,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_ring  *rx_ring,\n \treturn ERR_PTR(-result);\n }\n \n+static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,\n+\t\t\t\t struct ixgbe_rx_buffer *rx_buffer,\n+\t\t\t\t unsigned int size)\n+{\n+#if (PAGE_SIZE < 8192)\n+\tunsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;\n+\n+\trx_buffer->page_offset ^= truesize;\n+#else\n+\tunsigned int truesize = ring_uses_build_skb(rx_ring) ?\n+\t\t\t\tSKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :\n+\t\t\t\tSKB_DATA_ALIGN(size);\n+\n+\trx_buffer->page_offset += truesize;\n+#endif\n+}\n+\n /**\n  * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf\n  * @q_vector: structure containing interrupt and ring information\n@@ -2221,8 +2278,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,\n \t\t\t       const int budget)\n {\n \tunsigned int total_rx_bytes = 0, total_rx_packets = 0;\n-#ifdef IXGBE_FCOE\n \tstruct ixgbe_adapter *adapter = q_vector->adapter;\n+#ifdef IXGBE_FCOE\n \tint ddp_bytes;\n \tunsigned int mss = 0;\n #endif /* IXGBE_FCOE */\n@@ -2262,13 +2319,16 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,\n \t\t\t\t\t\tixgbe_rx_offset(rx_ring);\n \t\t\txdp.data_end = xdp.data + size;\n \n-\t\t\tskb = ixgbe_run_xdp(rx_ring, &xdp);\n+\t\t\tskb = ixgbe_run_xdp(adapter, rx_ring, &xdp);\n \t\t}\n \n \t\tif (IS_ERR(skb)) {\n+\t\t\tif (PTR_ERR(skb) == -IXGBE_XDP_TX)\n+\t\t\t\tixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);\n+\t\t\telse\n+\t\t\t\trx_buffer->pagecnt_bias++;\n \t\t\ttotal_rx_packets++;\n \t\t\ttotal_rx_bytes += size;\n-\t\t\trx_buffer->pagecnt_bias++;\n \t\t} else if (skb) {\n \t\t\tixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);\n \t\t} else if (ring_uses_build_skb(rx_ring)) {\n@@ -3438,6 +3498,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)\n \t/* Setup the HW Tx Head and Tail descriptor pointers */\n \tfor (i = 0; i < adapter->num_tx_queues; i++)\n \t\tixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);\n+\tfor (i = 0; i < adapter->num_xdp_queues; i++)\n+\t\tixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);\n }\n \n static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,\n@@ -5579,7 +5641,10 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)\n \t\tunion ixgbe_adv_tx_desc *eop_desc, *tx_desc;\n \n \t\t/* Free all the Tx ring sk_buffs */\n-\t\tdev_kfree_skb_any(tx_buffer->skb);\n+\t\tif (ring_is_xdp(tx_ring))\n+\t\t\tpage_frag_free(tx_buffer->data);\n+\t\telse\n+\t\t\tdev_kfree_skb_any(tx_buffer->skb);\n \n \t\t/* unmap skb header data */\n \t\tdma_unmap_single(tx_ring->dev,\n@@ -5620,7 +5685,8 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)\n \t}\n \n \t/* reset BQL for queue */\n-\tnetdev_tx_reset_queue(txring_txq(tx_ring));\n+\tif (!ring_is_xdp(tx_ring))\n+\t\tnetdev_tx_reset_queue(txring_txq(tx_ring));\n \n \t/* reset next_to_use and next_to_clean */\n \ttx_ring->next_to_use = 0;\n@@ -5649,6 +5715,8 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)\n \n \tfor (i = 0; i < adapter->num_tx_queues; i++)\n \t\tixgbe_clean_tx_ring(adapter->tx_ring[i]);\n+\tfor (i = 0; i < adapter->num_xdp_queues; i++)\n+\t\tixgbe_clean_tx_ring(adapter->xdp_ring[i]);\n }\n \n static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)\n@@ -5743,6 +5811,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)\n \t\tu8 reg_idx = adapter->tx_ring[i]->reg_idx;\n \t\tIXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);\n \t}\n+\tfor (i = 0; i < adapter->num_xdp_queues; i++) {\n+\t\tu8 reg_idx = adapter->xdp_ring[i]->reg_idx;\n+\n+\t\tIXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);\n+\t}\n \n \t/* Disable the Tx DMA engine on 82599 and later MAC */\n \tswitch (hw->mac.type) {\n@@ -6111,7 +6184,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)\n  **/\n static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)\n {\n-\tint i, err = 0;\n+\tint i, j = 0, err = 0;\n \n \tfor (i = 0; i < adapter->num_tx_queues; i++) {\n \t\terr = ixgbe_setup_tx_resources(adapter->tx_ring[i]);\n@@ -6121,10 +6194,21 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)\n \t\te_err(probe, \"Allocation for Tx Queue %u failed\\n\", i);\n \t\tgoto err_setup_tx;\n \t}\n+\tfor (j = 0; j < adapter->num_xdp_queues; j++) {\n+\t\terr = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);\n+\t\tif (!err)\n+\t\t\tcontinue;\n+\n+\t\te_err(probe, \"Allocation for Tx Queue %u failed\\n\", j);\n+\t\tgoto err_setup_tx;\n+\t}\n+\n \n \treturn 0;\n err_setup_tx:\n \t/* rewind the index freeing the rings as we go */\n+\twhile (j--)\n+\t\tixgbe_free_tx_resources(adapter->xdp_ring[j]);\n \twhile (i--)\n \t\tixgbe_free_tx_resources(adapter->tx_ring[i]);\n \treturn err;\n@@ -6255,6 +6339,9 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)\n \tfor (i = 0; i < adapter->num_tx_queues; i++)\n \t\tif (adapter->tx_ring[i]->desc)\n \t\t\tixgbe_free_tx_resources(adapter->tx_ring[i]);\n+\tfor (i = 0; i < adapter->num_xdp_queues; i++)\n+\t\tif (adapter->xdp_ring[i]->desc)\n+\t\t\tixgbe_free_tx_resources(adapter->xdp_ring[i]);\n }\n \n /**\n@@ -6674,6 +6761,14 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)\n \t\tbytes += tx_ring->stats.bytes;\n \t\tpackets += tx_ring->stats.packets;\n \t}\n+\tfor (i = 0; i < adapter->num_xdp_queues; i++) {\n+\t\tstruct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];\n+\n+\t\trestart_queue += xdp_ring->tx_stats.restart_queue;\n+\t\ttx_busy += xdp_ring->tx_stats.tx_busy;\n+\t\tbytes += xdp_ring->stats.bytes;\n+\t\tpackets += xdp_ring->stats.packets;\n+\t}\n \tadapter->restart_queue = restart_queue;\n \tadapter->tx_busy = tx_busy;\n \tnetdev->stats.tx_bytes = bytes;\n@@ -6867,6 +6962,9 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)\n \t\tfor (i = 0; i < adapter->num_tx_queues; i++)\n \t\t\tset_bit(__IXGBE_TX_FDIR_INIT_DONE,\n \t\t\t\t&(adapter->tx_ring[i]->state));\n+\t\tfor (i = 0; i < adapter->num_xdp_queues; i++)\n+\t\t\tset_bit(__IXGBE_TX_FDIR_INIT_DONE,\n+\t\t\t\t&adapter->xdp_ring[i]->state);\n \t\t/* re-enable flow director interrupts */\n \t\tIXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);\n \t} else {\n@@ -6900,6 +6998,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)\n \tif (netif_carrier_ok(adapter->netdev)) {\n \t\tfor (i = 0; i < adapter->num_tx_queues; i++)\n \t\t\tset_check_for_tx_hang(adapter->tx_ring[i]);\n+\t\tfor (i = 0; i < adapter->num_xdp_queues; i++)\n+\t\t\tset_check_for_tx_hang(adapter->xdp_ring[i]);\n \t}\n \n \tif (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {\n@@ -7130,6 +7230,13 @@ static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)\n \t\t\treturn true;\n \t}\n \n+\tfor (i = 0; i < adapter->num_xdp_queues; i++) {\n+\t\tstruct ixgbe_ring *ring = adapter->xdp_ring[i];\n+\n+\t\tif (ring->next_to_use != ring->next_to_clean)\n+\t\t\treturn true;\n+\t}\n+\n \treturn false;\n }\n \n@@ -8090,6 +8197,69 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,\n #endif\n }\n \n+static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,\n+\t\t\t       struct xdp_buff *xdp)\n+{\n+\tstruct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];\n+\tstruct ixgbe_tx_buffer *tx_buffer;\n+\tunion ixgbe_adv_tx_desc *tx_desc;\n+\tu32 len, cmd_type;\n+\tdma_addr_t dma;\n+\tu16 i;\n+\n+\tlen = xdp->data_end - xdp->data;\n+\n+\tif (unlikely(!ixgbe_desc_unused(ring)))\n+\t\treturn IXGBE_XDP_CONSUMED;\n+\n+\tdma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);\n+\tif (dma_mapping_error(ring->dev, dma))\n+\t\treturn IXGBE_XDP_CONSUMED;\n+\n+\t/* record the location of the first descriptor for this packet */\n+\ttx_buffer = &ring->tx_buffer_info[ring->next_to_use];\n+\ttx_buffer->bytecount = len;\n+\ttx_buffer->gso_segs = 1;\n+\ttx_buffer->protocol = 0;\n+\n+\ti = ring->next_to_use;\n+\ttx_desc = IXGBE_TX_DESC(ring, i);\n+\n+\tdma_unmap_len_set(tx_buffer, len, len);\n+\tdma_unmap_addr_set(tx_buffer, dma, dma);\n+\ttx_buffer->data = xdp->data;\n+\ttx_desc->read.buffer_addr = cpu_to_le64(dma);\n+\n+\t/* put descriptor type bits */\n+\tcmd_type = IXGBE_ADVTXD_DTYP_DATA |\n+\t\t   IXGBE_ADVTXD_DCMD_DEXT |\n+\t\t   IXGBE_ADVTXD_DCMD_IFCS;\n+\tcmd_type |= len | IXGBE_TXD_CMD;\n+\ttx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);\n+\ttx_desc->read.olinfo_status =\n+\t\tcpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);\n+\n+\t/* Force memory writes to complete before letting h/w know there\n+\t * are new descriptors to fetch.  (Only applicable for weak-ordered\n+\t * memory model archs, such as IA-64).\n+\t *\n+\t * We also need this memory barrier to make certain all of the\n+\t * status bits have been updated before next_to_watch is written.\n+\t */\n+\twmb();\n+\n+\t/* set next_to_watch value indicating a packet is present */\n+\ti++;\n+\tif (i == ring->count)\n+\t\ti = 0;\n+\n+\ttx_buffer->next_to_watch = tx_desc;\n+\tring->next_to_use = i;\n+\n+\twritel(i, ring->tail);\n+\treturn IXGBE_XDP_TX;\n+}\n+\n netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,\n \t\t\t  struct ixgbe_adapter *adapter,\n \t\t\t  struct ixgbe_ring *tx_ring)\n@@ -8381,6 +8551,23 @@ static void ixgbe_netpoll(struct net_device *netdev)\n \n #endif\n \n+static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,\n+\t\t\t\t   struct ixgbe_ring *ring)\n+{\n+\tu64 bytes, packets;\n+\tunsigned int start;\n+\n+\tif (ring) {\n+\t\tdo {\n+\t\t\tstart = u64_stats_fetch_begin_irq(&ring->syncp);\n+\t\t\tpackets = ring->stats.packets;\n+\t\t\tbytes   = ring->stats.bytes;\n+\t\t} while (u64_stats_fetch_retry_irq(&ring->syncp, start));\n+\t\tstats->tx_packets += packets;\n+\t\tstats->tx_bytes   += bytes;\n+\t}\n+}\n+\n static void ixgbe_get_stats64(struct net_device *netdev,\n \t\t\t      struct rtnl_link_stats64 *stats)\n {\n@@ -8406,18 +8593,13 @@ static void ixgbe_get_stats64(struct net_device *netdev,\n \n \tfor (i = 0; i < adapter->num_tx_queues; i++) {\n \t\tstruct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);\n-\t\tu64 bytes, packets;\n-\t\tunsigned int start;\n \n-\t\tif (ring) {\n-\t\t\tdo {\n-\t\t\t\tstart = u64_stats_fetch_begin_irq(&ring->syncp);\n-\t\t\t\tpackets = ring->stats.packets;\n-\t\t\t\tbytes   = ring->stats.bytes;\n-\t\t\t} while (u64_stats_fetch_retry_irq(&ring->syncp, start));\n-\t\t\tstats->tx_packets += packets;\n-\t\t\tstats->tx_bytes   += bytes;\n-\t\t}\n+\t\tixgbe_get_ring_stats64(stats, ring);\n+\t}\n+\tfor (i = 0; i < adapter->num_xdp_queues; i++) {\n+\t\tstruct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);\n+\n+\t\tixgbe_get_ring_stats64(stats, ring);\n \t}\n \trcu_read_unlock();\n \n@@ -9559,7 +9741,21 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)\n \t\t\treturn -EINVAL;\n \t}\n \n+\tif (nr_cpu_ids > MAX_XDP_QUEUES)\n+\t\treturn -ENOMEM;\n+\n \told_prog = xchg(&adapter->xdp_prog, prog);\n+\n+\t/* If transitioning XDP modes reconfigure rings */\n+\tif (!!prog != !!old_prog) {\n+\t\tint err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev));\n+\n+\t\tif (err) {\n+\t\t\trcu_assign_pointer(adapter->xdp_prog, old_prog);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n \tfor (i = 0; i < adapter->num_rx_queues; i++)\n \t\txchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);\n \n@@ -10070,6 +10266,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)\n \tfor (i = 0; i < adapter->num_tx_queues; i++)\n \t\tu64_stats_init(&adapter->tx_ring[i]->syncp);\n \n+\tfor (i = 0; i < adapter->num_xdp_queues; i++)\n+\t\tu64_stats_init(&adapter->xdp_ring[i]->syncp);\n+\n \t/* WOL not supported for all devices */\n \tadapter->wol = 0;\n \thw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);\n",
    "prefixes": [
        "v7",
        "2/2"
    ]
}