get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/711984/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 711984,
    "url": "http://patchwork.ozlabs.org/api/patches/711984/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20170106160751.1501.83692.stgit@localhost.localdomain/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20170106160751.1501.83692.stgit@localhost.localdomain>",
    "list_archive_url": null,
    "date": "2017-01-06T16:07:53",
    "name": "[next,11/11] ixgbe: Don't bother clearing buffer memory for descriptor rings",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "cd7112c31b9ea428120ba935ba704ce5b002e4f5",
    "submitter": {
        "id": 252,
        "url": "http://patchwork.ozlabs.org/api/people/252/?format=api",
        "name": "Alexander Duyck",
        "email": "alexander.duyck@gmail.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20170106160751.1501.83692.stgit@localhost.localdomain/mbox/",
    "series": [],
    "comments": "http://patchwork.ozlabs.org/api/patches/711984/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/711984/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@lists.osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Received": [
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3tw8bP47Ymz9sxS\n\tfor <incoming@patchwork.ozlabs.org>;\n\tSat,  7 Jan 2017 03:08:01 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 1D00985C3F;\n\tFri,  6 Jan 2017 16:08:00 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id bGSkmcqz7jBR; Fri,  6 Jan 2017 16:07:57 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 4F69D85BC3;\n\tFri,  6 Jan 2017 16:07:57 +0000 (UTC)",
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\tby ash.osuosl.org (Postfix) with ESMTP id 1928D1BFA57\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tFri,  6 Jan 2017 16:07:56 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 1150B85BC3\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tFri,  6 Jan 2017 16:07:56 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 1GH22ugS+039 for <intel-wired-lan@lists.osuosl.org>;\n\tFri,  6 Jan 2017 16:07:54 +0000 (UTC)",
            "from mail-pg0-f67.google.com (mail-pg0-f67.google.com\n\t[74.125.83.67])\n\tby whitealder.osuosl.org (Postfix) with ESMTPS id DAFD685AD9\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tFri,  6 Jan 2017 16:07:54 +0000 (UTC)",
            "by mail-pg0-f67.google.com with SMTP id 75so6191874pgf.3\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tFri, 06 Jan 2017 08:07:54 -0800 (PST)",
            "from localhost.localdomain ([2001:470:b:9c3:9e5c:8eff:fe4f:f2d0])\n\tby smtp.gmail.com with ESMTPSA id\n\tq14sm161003599pfa.40.2017.01.06.08.07.53\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tFri, 06 Jan 2017 08:07:53 -0800 (PST)"
        ],
        "Authentication-Results": "ozlabs.org;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n\tunprotected) header.d=gmail.com header.i=@gmail.com\n\theader.b=\"ssbnuepa\"; dkim-atps=neutral",
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;\n\th=subject:from:to:cc:date:message-id:in-reply-to:references\n\t:user-agent:mime-version:content-transfer-encoding;\n\tbh=RCt1i9N7ggKjtuVjuWAuyPO1DV/JYGQ6RgpoMw1/Zrg=;\n\tb=ssbnuepawvmWDXtl44o5s/cJsrMhjN/97kYlmqqny4eCd6hUpQePl3FunQCNY+mp7Q\n\tomHjLCgCqXWan1jWrIGYkd3V2nDueOsH4+opn6ptxkAM4C4/E6NrWGaRXr3NyOcPt7Hh\n\t4O4w39rOruXpBOjQn0zutR/sSmuYGKiOBw0d4e97rxg0P7O3N6UMKDY9FEX/ZXcPq4RD\n\tm5UtIMqcII0WxGUirGXS4KqfCQTAVrVjeO20qay6bLoD9BpnToMxj1eMqY3kcVeJshLZ\n\tCBy90Bz/uWkUgmVl4d3eN4NQIW6evR95xnEXOm1VJqsU4EOGz0lkxxlNZXQhG6gKwPNW\n\tWQPg==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:subject:from:to:cc:date:message-id:in-reply-to\n\t:references:user-agent:mime-version:content-transfer-encoding;\n\tbh=RCt1i9N7ggKjtuVjuWAuyPO1DV/JYGQ6RgpoMw1/Zrg=;\n\tb=eNDDnVNyR0KKHJhmFe6VphAW8LB0VFV2tyBJJdMqvjiFp/4OIlZwtafaVVOKLDFvLC\n\t+1lwKB4M0WQcQgY5bUCKXpjdmCnvMApewTMIgMwzwVLXVxE+Psm6lleJzl8CntySX/z0\n\tOw1nXOc7fwlYYR2tWI0xjxzidE4oF09HS9t3gx6EUHJDHEPmvRJAEh2ygXPyb2joSjgU\n\tLeNBVJl4Komm8Na3GKIsAM5vJzrZxxG/grtT0zkhUtom/aLmFPvOk9IJjkWtdzFbvSJh\n\tISiSNFs7oFDdTjytjI0pprQAG2XoyQjepxlFYtWW8GkCE0yJgX5Tp/qzz1CjYhj80pLO\n\tJCRw==",
        "X-Gm-Message-State": "AIkVDXLYHQl73P7p7qCCmu8ZSEPyRqswiTrRne0rl0ZlNrE5idBl87pwtp4gz3mt3FpSqw==",
        "X-Received": "by 10.99.235.10 with SMTP id t10mr141399991pgh.95.1483718874474; \n\tFri, 06 Jan 2017 08:07:54 -0800 (PST)",
        "From": "Alexander Duyck <alexander.duyck@gmail.com>",
        "To": "intel-wired-lan@lists.osuosl.org, jeffrey.t.kirsher@intel.com",
        "Date": "Fri, 06 Jan 2017 08:07:53 -0800",
        "Message-ID": "<20170106160751.1501.83692.stgit@localhost.localdomain>",
        "In-Reply-To": "<20170106155448.1501.31298.stgit@localhost.localdomain>",
        "References": "<20170106155448.1501.31298.stgit@localhost.localdomain>",
        "User-Agent": "StGit/0.17.1-dirty",
        "MIME-Version": "1.0",
        "Cc": "netdev@vger.kernel.org",
        "Subject": "[Intel-wired-lan] [next PATCH 11/11] ixgbe: Don't bother clearing\n\tbuffer memory for descriptor rings",
        "X-BeenThere": "intel-wired-lan@lists.osuosl.org",
        "X-Mailman-Version": "2.1.18-1",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.lists.osuosl.org>",
        "List-Unsubscribe": "<http://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@lists.osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@lists.osuosl.org?subject=help>",
        "List-Subscribe": "<http://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=subscribe>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@lists.osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@lists.osuosl.org>"
    },
    "content": "From: Alexander Duyck <alexander.h.duyck@intel.com>\n\nThis patch makes it so that we don't need to bother with clearing the\nmemory out for the descriptor rings.  The general idea is to only free\nbuffers associated with buffers in use which are located between the\nnext_to_clean and next_to_use or next_to_alloc values.  Everything outside\nof those regions can be safely ignored since they should have no buffers\nassociated with them.\n\nThe advantage to doing things this way is that is should speed up bring-up\nand tear-down of the rings.  Specifically we can avoid the 512 or more\ncycles required to memset the rings in tear-down.  In the bring-up phase we\nthen clear the memory as a part of initialization.  The general idea is\nthat the clearing in initialization can act as a prefetch of sorts for the\nbuffer info structures so they are in the local CPU when we go to populate\nthem.  This should help to improve overall time needed to perform a\nsuspend/resume.\n\nSigned-off-by: Alexander Duyck <alexander.h.duyck@intel.com>\n---\n drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c |   11 +-\n drivers/net/ethernet/intel/ixgbe/ixgbe_main.c    |  158 ++++++++++++----------\n 2 files changed, 98 insertions(+), 71 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c\nindex 6466d828bc6f..8c6f40d4db6f 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c\n@@ -1945,7 +1945,16 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,\n \n \t\t/* unmap buffer on Tx side */\n \t\ttx_buffer = &tx_ring->tx_buffer_info[tx_ntc];\n-\t\tixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);\n+\n+\t\t/* Free all the Tx ring sk_buffs */\n+\t\tdev_kfree_skb_any(tx_buffer->skb);\n+\n+\t\t/* unmap skb header data */\n+\t\tdma_unmap_single(tx_ring->dev,\n+\t\t\t\t dma_unmap_addr(tx_buffer, dma),\n+\t\t\t\t dma_unmap_len(tx_buffer, len),\n+\t\t\t\t DMA_TO_DEVICE);\n+\t\tdma_unmap_len_set(tx_buffer, len, 0);\n \n \t\t/* increment Rx/Tx next to clean counters */\n \t\trx_ntc++;\ndiff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\nindex dd04b76ae0bd..6ec0ebf7f174 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\n@@ -958,28 +958,6 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,\n \t}\n }\n \n-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,\n-\t\t\t\t      struct ixgbe_tx_buffer *tx_buffer)\n-{\n-\tif (tx_buffer->skb) {\n-\t\tdev_kfree_skb_any(tx_buffer->skb);\n-\t\tif (dma_unmap_len(tx_buffer, len))\n-\t\t\tdma_unmap_single(ring->dev,\n-\t\t\t\t\t dma_unmap_addr(tx_buffer, dma),\n-\t\t\t\t\t dma_unmap_len(tx_buffer, len),\n-\t\t\t\t\t DMA_TO_DEVICE);\n-\t} else if (dma_unmap_len(tx_buffer, len)) {\n-\t\tdma_unmap_page(ring->dev,\n-\t\t\t       dma_unmap_addr(tx_buffer, dma),\n-\t\t\t       dma_unmap_len(tx_buffer, len),\n-\t\t\t       DMA_TO_DEVICE);\n-\t}\n-\ttx_buffer->next_to_watch = NULL;\n-\ttx_buffer->skb = NULL;\n-\tdma_unmap_len_set(tx_buffer, len, 0);\n-\t/* tx_buffer must be completely set up in the transmit path */\n-}\n-\n static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)\n {\n \tstruct ixgbe_hw *hw = &adapter->hw;\n@@ -1211,7 +1189,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,\n \t\t\t\t DMA_TO_DEVICE);\n \n \t\t/* clear tx_buffer data */\n-\t\ttx_buffer->skb = NULL;\n \t\tdma_unmap_len_set(tx_buffer, len, 0);\n \n \t\t/* unmap remaining buffers */\n@@ -3345,6 +3322,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,\n \n \tclear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);\n \n+\t/* reinitialize tx_buffer_info */\n+\tmemset(ring->tx_buffer_info, 0,\n+\t       sizeof(struct ixgbe_tx_buffer) * ring->count);\n+\n \t/* enable queue */\n \tIXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);\n \n@@ -3865,6 +3846,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,\n #endif\n \t}\n \n+\t/* initialize rx_buffer_info */\n+\tmemset(ring->rx_buffer_info, 0,\n+\t       sizeof(struct ixgbe_rx_buffer) * ring->count);\n+\n \t/* initialize Rx descriptor 0 */\n \trx_desc = IXGBE_RX_DESC(ring, 0);\n \trx_desc->wb.upper.length = 0;\n@@ -5049,33 +5034,22 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)\n  **/\n static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)\n {\n-\tstruct device *dev = rx_ring->dev;\n-\tunsigned long size;\n-\tu16 i;\n-\n-\t/* ring already cleared, nothing to do */\n-\tif (!rx_ring->rx_buffer_info)\n-\t\treturn;\n+\tu16 i = rx_ring->next_to_clean;\n+\tstruct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];\n \n \t/* Free all the Rx ring sk_buffs */\n-\tfor (i = 0; i < rx_ring->count; i++) {\n-\t\tstruct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];\n-\n+\twhile (i != rx_ring->next_to_alloc) {\n \t\tif (rx_buffer->skb) {\n \t\t\tstruct sk_buff *skb = rx_buffer->skb;\n \t\t\tif (IXGBE_CB(skb)->page_released)\n-\t\t\t\tdma_unmap_page_attrs(dev,\n+\t\t\t\tdma_unmap_page_attrs(rx_ring->dev,\n \t\t\t\t\t\t     IXGBE_CB(skb)->dma,\n \t\t\t\t\t\t     ixgbe_rx_pg_size(rx_ring),\n \t\t\t\t\t\t     DMA_FROM_DEVICE,\n \t\t\t\t\t\t     IXGBE_RX_DMA_ATTR);\n \t\t\tdev_kfree_skb(skb);\n-\t\t\trx_buffer->skb = NULL;\n \t\t}\n \n-\t\tif (!rx_buffer->page)\n-\t\t\tcontinue;\n-\n \t\t/* Invalidate cache lines that may have been written to by\n \t\t * device so that we avoid corrupting memory.\n \t\t */\n@@ -5086,19 +5060,21 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)\n \t\t\t\t\t      DMA_FROM_DEVICE);\n \n \t\t/* free resources associated with mapping */\n-\t\tdma_unmap_page_attrs(dev, rx_buffer->dma,\n+\t\tdma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,\n \t\t\t\t     ixgbe_rx_pg_size(rx_ring),\n \t\t\t\t     DMA_FROM_DEVICE,\n \t\t\t\t     IXGBE_RX_DMA_ATTR);\n \t\t__page_frag_cache_drain(rx_buffer->page,\n \t\t\t\t\trx_buffer->pagecnt_bias);\n \n-\t\trx_buffer->page = NULL;\n+\t\ti++;\n+\t\trx_buffer++;\n+\t\tif (i == rx_ring->count) {\n+\t\t\ti = 0;\n+\t\t\trx_buffer = rx_ring->rx_buffer_info;\n+\t\t}\n \t}\n \n-\tsize = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;\n-\tmemset(rx_ring->rx_buffer_info, 0, size);\n-\n \trx_ring->next_to_alloc = 0;\n \trx_ring->next_to_clean = 0;\n \trx_ring->next_to_use = 0;\n@@ -5567,28 +5543,57 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)\n  **/\n static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)\n {\n-\tstruct ixgbe_tx_buffer *tx_buffer_info;\n-\tunsigned long size;\n-\tu16 i;\n+\tu16 i = tx_ring->next_to_clean;\n+\tstruct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];\n \n-\t/* ring already cleared, nothing to do */\n-\tif (!tx_ring->tx_buffer_info)\n-\t\treturn;\n+\twhile (i != tx_ring->next_to_use) {\n+\t\tunion ixgbe_adv_tx_desc *eop_desc, *tx_desc;\n \n-\t/* Free all the Tx ring sk_buffs */\n-\tfor (i = 0; i < tx_ring->count; i++) {\n-\t\ttx_buffer_info = &tx_ring->tx_buffer_info[i];\n-\t\tixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);\n-\t}\n+\t\t/* Free all the Tx ring sk_buffs */\n+\t\tdev_kfree_skb_any(tx_buffer->skb);\n \n-\tnetdev_tx_reset_queue(txring_txq(tx_ring));\n+\t\t/* unmap skb header data */\n+\t\tdma_unmap_single(tx_ring->dev,\n+\t\t\t\t dma_unmap_addr(tx_buffer, dma),\n+\t\t\t\t dma_unmap_len(tx_buffer, len),\n+\t\t\t\t DMA_TO_DEVICE);\n \n-\tsize = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;\n-\tmemset(tx_ring->tx_buffer_info, 0, size);\n+\t\t/* check for eop_desc to determine the end of the packet */\n+\t\teop_desc = tx_buffer->next_to_watch;\n+\t\ttx_desc = IXGBE_TX_DESC(tx_ring, i);\n \n-\t/* Zero out the descriptor ring */\n-\tmemset(tx_ring->desc, 0, tx_ring->size);\n+\t\t/* unmap remaining buffers */\n+\t\twhile (tx_desc != eop_desc) {\n+\t\t\ttx_buffer++;\n+\t\t\ttx_desc++;\n+\t\t\ti++;\n+\t\t\tif (unlikely(i == tx_ring->count)) {\n+\t\t\t\ti = 0;\n+\t\t\t\ttx_buffer = tx_ring->tx_buffer_info;\n+\t\t\t\ttx_desc = IXGBE_TX_DESC(tx_ring, 0);\n+\t\t\t}\n+\n+\t\t\t/* unmap any remaining paged data */\n+\t\t\tif (dma_unmap_len(tx_buffer, len))\n+\t\t\t\tdma_unmap_page(tx_ring->dev,\n+\t\t\t\t\t       dma_unmap_addr(tx_buffer, dma),\n+\t\t\t\t\t       dma_unmap_len(tx_buffer, len),\n+\t\t\t\t\t       DMA_TO_DEVICE);\n+\t\t}\n \n+\t\t/* move us one more past the eop_desc for start of next pkt */\n+\t\ttx_buffer++;\n+\t\ti++;\n+\t\tif (unlikely(i == tx_ring->count)) {\n+\t\t\ti = 0;\n+\t\t\ttx_buffer = tx_ring->tx_buffer_info;\n+\t\t}\n+\t}\n+\n+\t/* reset BQL for queue */\n+\tnetdev_tx_reset_queue(txring_txq(tx_ring));\n+\n+\t/* reset next_to_use and next_to_clean */\n \ttx_ring->next_to_use = 0;\n \ttx_ring->next_to_clean = 0;\n }\n@@ -6034,9 +6039,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)\n \tif (tx_ring->q_vector)\n \t\tring_node = tx_ring->q_vector->numa_node;\n \n-\ttx_ring->tx_buffer_info = vzalloc_node(size, ring_node);\n+\ttx_ring->tx_buffer_info = vmalloc_node(size, ring_node);\n \tif (!tx_ring->tx_buffer_info)\n-\t\ttx_ring->tx_buffer_info = vzalloc(size);\n+\t\ttx_ring->tx_buffer_info = vmalloc(size);\n \tif (!tx_ring->tx_buffer_info)\n \t\tgoto err;\n \n@@ -6116,9 +6121,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)\n \tif (rx_ring->q_vector)\n \t\tring_node = rx_ring->q_vector->numa_node;\n \n-\trx_ring->rx_buffer_info = vzalloc_node(size, ring_node);\n+\trx_ring->rx_buffer_info = vmalloc_node(size, ring_node);\n \tif (!rx_ring->rx_buffer_info)\n-\t\trx_ring->rx_buffer_info = vzalloc(size);\n+\t\trx_ring->rx_buffer_info = vmalloc(size);\n \tif (!rx_ring->rx_buffer_info)\n \t\tgoto err;\n \n@@ -7836,16 +7841,29 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,\n \tdev_err(tx_ring->dev, \"TX DMA map failed\\n\");\n \n \t/* clear dma mappings for failed tx_buffer_info map */\n-\tfor (;;) {\n+\twhile (tx_buffer != first) {\n+\t\tif (dma_unmap_len(tx_buffer, len))\n+\t\t\tdma_unmap_page(tx_ring->dev,\n+\t\t\t\t       dma_unmap_addr(tx_buffer, dma),\n+\t\t\t\t       dma_unmap_len(tx_buffer, len),\n+\t\t\t\t       DMA_TO_DEVICE);\n+\t\tdma_unmap_len_set(tx_buffer, len, 0);\n+\n+\t\tif (i--)\n+\t\t\ti += tx_ring->count;\n \t\ttx_buffer = &tx_ring->tx_buffer_info[i];\n-\t\tixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);\n-\t\tif (tx_buffer == first)\n-\t\t\tbreak;\n-\t\tif (i == 0)\n-\t\t\ti = tx_ring->count;\n-\t\ti--;\n \t}\n \n+\tif (dma_unmap_len(tx_buffer, len))\n+\t\tdma_unmap_single(tx_ring->dev,\n+\t\t\t\t dma_unmap_addr(tx_buffer, dma),\n+\t\t\t\t dma_unmap_len(tx_buffer, len),\n+\t\t\t\t DMA_TO_DEVICE);\n+\tdma_unmap_len_set(tx_buffer, len, 0);\n+\n+\tdev_kfree_skb_any(first->skb);\n+\tfirst->skb = NULL;\n+\n \ttx_ring->next_to_use = i;\n }\n \n",
    "prefixes": [
        "next",
        "11/11"
    ]
}