get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/738800/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 738800,
    "url": "http://patchwork.ozlabs.org/api/patches/738800/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/1489511727-10959-3-git-send-email-bimmy.pujari@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<1489511727-10959-3-git-send-email-bimmy.pujari@intel.com>",
    "list_archive_url": null,
    "date": "2017-03-14T17:15:24",
    "name": "[next,S63,3/6] i40e/i40evf: Pull out code for cleaning up Rx buffers",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "5a5bab4627bad1a628a0af9accaa7ff386355705",
    "submitter": {
        "id": 68919,
        "url": "http://patchwork.ozlabs.org/api/people/68919/?format=api",
        "name": "Pujari, Bimmy",
        "email": "bimmy.pujari@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/1489511727-10959-3-git-send-email-bimmy.pujari@intel.com/mbox/",
    "series": [],
    "comments": "http://patchwork.ozlabs.org/api/patches/738800/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/738800/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@lists.osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Received": [
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3vjKdr6V4lz9s2Q\n\tfor <incoming@patchwork.ozlabs.org>;\n\tWed, 15 Mar 2017 03:17:52 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 6849A89657;\n\tTue, 14 Mar 2017 16:17:51 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 12SUeRVEce6v; Tue, 14 Mar 2017 16:17:44 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 03F5089670;\n\tTue, 14 Mar 2017 16:17:41 +0000 (UTC)",
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\tby ash.osuosl.org (Postfix) with ESMTP id AA6CA1BFEBB\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 14 Mar 2017 16:17:39 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id A556489675\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 14 Mar 2017 16:17:39 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 9YUWD9iNoX5F for <intel-wired-lan@lists.osuosl.org>;\n\tTue, 14 Mar 2017 16:17:36 +0000 (UTC)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby whitealder.osuosl.org (Postfix) with ESMTPS id 7321589657\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 14 Mar 2017 16:17:36 +0000 (UTC)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n\tby orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t14 Mar 2017 09:17:35 -0700",
            "from bimmy.jf.intel.com (HELO bimmy.linux1.jf.intel.com)\n\t([10.166.35.87])\n\tby orsmga001.jf.intel.com with ESMTP; 14 Mar 2017 09:17:34 -0700"
        ],
        "Authentication-Results": "ozlabs.org;\n\tdkim=fail reason=\"key not found in DNS\" (0-bit key;\n\tunprotected) header.d=intel.com header.i=@intel.com\n\theader.b=\"sDvZVgYI\"; dkim-atps=neutral",
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=simple/simple;\n\td=intel.com; i=@intel.com; q=dns/txt; s=intel;\n\tt=1489508256; x=1521044256;\n\th=from:to:cc:subject:date:message-id:in-reply-to: references;\n\tbh=UOzRzqkH+coV559KtCPZ9hM7WEQLw43cWfkHg2twhNs=;\n\tb=sDvZVgYIWjjgC9yHrR30U8rNQXIituQRgrCz/z1cFMyEmD60sLmH9hcq\n\tefepWG0v8VHhCY0NJX3aufnSF8/bUg==;",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos; i=\"5.36,164,1486454400\"; d=\"scan'208\";\n\ta=\"1108362398\"",
        "From": "Bimmy Pujari <bimmy.pujari@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Tue, 14 Mar 2017 10:15:24 -0700",
        "Message-Id": "<1489511727-10959-3-git-send-email-bimmy.pujari@intel.com>",
        "X-Mailer": "git-send-email 2.4.11",
        "In-Reply-To": "<1489511727-10959-1-git-send-email-bimmy.pujari@intel.com>",
        "References": "<1489511727-10959-1-git-send-email-bimmy.pujari@intel.com>",
        "Subject": "[Intel-wired-lan] [next PATCH S63 3/6] i40e/i40evf: Pull out code\n\tfor cleaning up Rx buffers",
        "X-BeenThere": "intel-wired-lan@lists.osuosl.org",
        "X-Mailman-Version": "2.1.18-1",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.lists.osuosl.org>",
        "List-Unsubscribe": "<http://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@lists.osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@lists.osuosl.org?subject=help>",
        "List-Subscribe": "<http://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=subscribe>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@lists.osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@lists.osuosl.org>"
    },
    "content": "From: Alexander Duyck <alexander.h.duyck@intel.com>\n\nThis patch pulls out the code responsible for handling buffer recycling and\npage counting and distributes it through several functions.  This allows us\nto commonize the bits that handle either freeing or recycling the buffers.\n\nAs far as the page count tracking one change to the logic is that\npagecnt_bias is decremented as soon as we call i40e_get_rx_buffer.  It is\nthen the responsibility of the function that pulls the data to either\nincrement the pagecnt_bias if the buffer can be recycled as-is, or to\nupdate page_offset so that we are pointing at the correct location for\nplacement of the next buffer.\n\nSigned-off-by: Alexander Duyck <alexander.h.duyck@intel.com>\nChange-ID: Ibac576360cb7f0b1627f2a993d13c1a8a2bf60af\n---\nTesting Hints:\n        The greatest risk with this patch is a memory leak of some sort.\n        I already caught one spot where I hadn't fully thought things out\n        in regards to the path where we don't support bulk page updates.\n        My advice would be to test on a RHEL 6.X kernel as well as a RHEL\n        7.X kernel as the 6.X won't support bulk page count updates while\n        the 7.3 and later kernels do.\n\n drivers/net/ethernet/intel/i40e/i40e_txrx.c   | 73 +++++++++++++++++----------\n drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 72 ++++++++++++++++----------\n 2 files changed, 89 insertions(+), 56 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c\nindex d1fc0f0..d7c4e1e 100644\n--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c\n+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c\n@@ -1294,6 +1294,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,\n \tbi->dma = dma;\n \tbi->page = page;\n \tbi->page_offset = 0;\n+\n+\t/* initialize pagecnt_bias to 1 representing we fully own page */\n \tbi->pagecnt_bias = 1;\n \n \treturn true;\n@@ -1622,8 +1624,6 @@ static inline bool i40e_page_is_reusable(struct page *page)\n  * the adapter for another receive\n  *\n  * @rx_buffer: buffer containing the page\n- * @page: page address from rx_buffer\n- * @truesize: actual size of the buffer in this page\n  *\n  * If page is reusable, rx_buffer->page_offset is adjusted to point to\n  * an unused region in the page.\n@@ -1646,14 +1646,13 @@ static inline bool i40e_page_is_reusable(struct page *page)\n  *\n  * In either case, if the page is reusable its refcount is increased.\n  **/\n-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,\n-\t\t\t\t   struct page *page,\n-\t\t\t\t   const unsigned int truesize)\n+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)\n {\n #if (PAGE_SIZE >= 8192)\n \tunsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;\n #endif\n-\tunsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;\n+\tunsigned int pagecnt_bias = rx_buffer->pagecnt_bias;\n+\tstruct page *page = rx_buffer->page;\n \n \t/* Is any reuse possible? */\n \tif (unlikely(!i40e_page_is_reusable(page)))\n@@ -1661,15 +1660,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,\n \n #if (PAGE_SIZE < 8192)\n \t/* if we are only owner of page we can reuse it */\n-\tif (unlikely(page_count(page) != pagecnt_bias))\n+\tif (unlikely((page_count(page) - pagecnt_bias) > 1))\n \t\treturn false;\n-\n-\t/* flip page offset to other buffer */\n-\trx_buffer->page_offset ^= truesize;\n #else\n-\t/* move offset up to the next cache line */\n-\trx_buffer->page_offset += truesize;\n-\n \tif (rx_buffer->page_offset > last_offset)\n \t\treturn false;\n #endif\n@@ -1678,10 +1671,11 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,\n \t * the pagecnt_bias and page count so that we fully restock the\n \t * number of references the driver holds.\n \t */\n-\tif (unlikely(pagecnt_bias == 1)) {\n+\tif (unlikely(!pagecnt_bias)) {\n \t\tpage_ref_add(page, USHRT_MAX);\n \t\trx_buffer->pagecnt_bias = USHRT_MAX;\n \t}\n+\n \treturn true;\n }\n \n@@ -1689,8 +1683,8 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,\n  * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff\n  * @rx_ring: rx descriptor ring to transact packets on\n  * @rx_buffer: buffer containing page to add\n- * @size: packet length from rx_desc\n  * @skb: sk_buff to place the data into\n+ * @size: packet length from rx_desc\n  *\n  * This function will add the data contained in rx_buffer->page to the skb.\n  * This is done either through a direct copy if the data in the buffer is\n@@ -1700,10 +1694,10 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,\n  * The function will then update the page offset if necessary and return\n  * true if the buffer can be reused by the adapter.\n  **/\n-static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,\n+static void i40e_add_rx_frag(struct i40e_ring *rx_ring,\n \t\t\t     struct i40e_rx_buffer *rx_buffer,\n-\t\t\t     unsigned int size,\n-\t\t\t     struct sk_buff *skb)\n+\t\t\t     struct sk_buff *skb,\n+\t\t\t     unsigned int size)\n {\n \tstruct page *page = rx_buffer->page;\n \tunsigned char *va = page_address(page) + rx_buffer->page_offset;\n@@ -1723,12 +1717,11 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,\n \tif (size <= I40E_RX_HDR_SIZE) {\n \t\tmemcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));\n \n-\t\t/* page is reusable, we can reuse buffer as-is */\n-\t\tif (likely(i40e_page_is_reusable(page)))\n-\t\t\treturn true;\n-\n-\t\t/* this page cannot be reused so discard it */\n-\t\treturn false;\n+\t\t/* page is to be freed, increase pagecnt_bias instead of\n+\t\t * decreasing page count.\n+\t\t */\n+\t\trx_buffer->pagecnt_bias++;\n+\t\treturn;\n \t}\n \n \t/* we need the header to contain the greater of either\n@@ -1750,7 +1743,12 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,\n \tskb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,\n \t\t\t(unsigned long)va & ~PAGE_MASK, size, truesize);\n \n-\treturn i40e_can_reuse_rx_page(rx_buffer, page, truesize);\n+\t/* page is being used so we must update the page offset */\n+#if (PAGE_SIZE < 8192)\n+\trx_buffer->page_offset ^= truesize;\n+#else\n+\trx_buffer->page_offset += truesize;\n+#endif\n }\n \n /**\n@@ -1776,6 +1774,9 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,\n \t\t\t\t      size,\n \t\t\t\t      DMA_FROM_DEVICE);\n \n+\t/* We have pulled a buffer for use, so decrement pagecnt_bias */\n+\trx_buffer->pagecnt_bias--;\n+\n \treturn rx_buffer;\n }\n \n@@ -1812,12 +1813,29 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,\n \t\t\t\t       GFP_ATOMIC | __GFP_NOWARN);\n \t\tif (unlikely(!skb)) {\n \t\t\trx_ring->rx_stats.alloc_buff_failed++;\n+\t\t\trx_buffer->pagecnt_bias++;\n \t\t\treturn NULL;\n \t\t}\n \t}\n \n \t/* pull page into skb */\n-\tif (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {\n+\ti40e_add_rx_frag(rx_ring, rx_buffer, skb, size);\n+\n+\treturn skb;\n+}\n+\n+/**\n+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free\n+ * @rx_ring: rx descriptor ring to transact packets on\n+ * @rx_buffer: rx buffer to pull data from\n+ *\n+ * This function will clean up the contents of the rx_buffer.  It will\n+ * either recycle the bufer or unmap it and free the associated resources.\n+ */\n+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,\n+\t\t\t       struct i40e_rx_buffer *rx_buffer)\n+{\n+\tif (i40e_can_reuse_rx_page(rx_buffer)) {\n \t\t/* hand second half of page back to the ring */\n \t\ti40e_reuse_rx_page(rx_ring, rx_buffer);\n \t\trx_ring->rx_stats.page_reuse_count++;\n@@ -1831,8 +1849,6 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,\n \n \t/* clear contents of buffer_info */\n \trx_buffer->page = NULL;\n-\n-\treturn skb;\n }\n \n /**\n@@ -1932,6 +1948,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)\n \t\tif (!skb)\n \t\t\tbreak;\n \n+\t\ti40e_put_rx_buffer(rx_ring, rx_buffer);\n \t\tcleaned_count++;\n \n \t\tif (i40e_is_non_eop(rx_ring, rx_desc, skb))\ndiff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c\nindex 2320ec4..06b3779 100644\n--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c\n+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c\n@@ -662,6 +662,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,\n \tbi->dma = dma;\n \tbi->page = page;\n \tbi->page_offset = 0;\n+\n+\t/* initialize pagecnt_bias to 1 representing we fully own page */\n \tbi->pagecnt_bias = 1;\n \n \treturn true;\n@@ -980,8 +982,6 @@ static inline bool i40e_page_is_reusable(struct page *page)\n  * the adapter for another receive\n  *\n  * @rx_buffer: buffer containing the page\n- * @page: page address from rx_buffer\n- * @truesize: actual size of the buffer in this page\n  *\n  * If page is reusable, rx_buffer->page_offset is adjusted to point to\n  * an unused region in the page.\n@@ -1004,14 +1004,13 @@ static inline bool i40e_page_is_reusable(struct page *page)\n  *\n  * In either case, if the page is reusable its refcount is increased.\n  **/\n-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,\n-\t\t\t\t   struct page *page,\n-\t\t\t\t   const unsigned int truesize)\n+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)\n {\n #if (PAGE_SIZE >= 8192)\n \tunsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;\n #endif\n-\tunsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;\n+\tunsigned int pagecnt_bias = rx_buffer->pagecnt_bias;\n+\tstruct page *page = rx_buffer->page;\n \n \t/* Is any reuse possible? */\n \tif (unlikely(!i40e_page_is_reusable(page)))\n@@ -1019,15 +1018,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,\n \n #if (PAGE_SIZE < 8192)\n \t/* if we are only owner of page we can reuse it */\n-\tif (unlikely(page_count(page) != pagecnt_bias))\n+\tif (unlikely((page_count(page) - pagecnt_bias) > 1))\n \t\treturn false;\n-\n-\t/* flip page offset to other buffer */\n-\trx_buffer->page_offset ^= truesize;\n #else\n-\t/* move offset up to the next cache line */\n-\trx_buffer->page_offset += truesize;\n-\n \tif (rx_buffer->page_offset > last_offset)\n \t\treturn false;\n #endif\n@@ -1036,7 +1029,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,\n \t * the pagecnt_bias and page count so that we fully restock the\n \t * number of references the driver holds.\n \t */\n-\tif (unlikely(pagecnt_bias == 1)) {\n+\tif (unlikely(!pagecnt_bias)) {\n \t\tpage_ref_add(page, USHRT_MAX);\n \t\trx_buffer->pagecnt_bias = USHRT_MAX;\n \t}\n@@ -1048,8 +1041,8 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,\n  * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff\n  * @rx_ring: rx descriptor ring to transact packets on\n  * @rx_buffer: buffer containing page to add\n- * @size: packet length from rx_desc\n  * @skb: sk_buff to place the data into\n+ * @size: packet length from rx_desc\n  *\n  * This function will add the data contained in rx_buffer->page to the skb.\n  * This is done either through a direct copy if the data in the buffer is\n@@ -1059,10 +1052,10 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,\n  * The function will then update the page offset if necessary and return\n  * true if the buffer can be reused by the adapter.\n  **/\n-static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,\n+static void i40e_add_rx_frag(struct i40e_ring *rx_ring,\n \t\t\t     struct i40e_rx_buffer *rx_buffer,\n-\t\t\t     unsigned int size,\n-\t\t\t     struct sk_buff *skb)\n+\t\t\t     struct sk_buff *skb,\n+\t\t\t     unsigned int size)\n {\n \tstruct page *page = rx_buffer->page;\n \tunsigned char *va = page_address(page) + rx_buffer->page_offset;\n@@ -1082,12 +1075,11 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,\n \tif (size <= I40E_RX_HDR_SIZE) {\n \t\tmemcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));\n \n-\t\t/* page is reusable, we can reuse buffer as-is */\n-\t\tif (likely(i40e_page_is_reusable(page)))\n-\t\t\treturn true;\n-\n-\t\t/* this page cannot be reused so discard it */\n-\t\treturn false;\n+\t\t/* page is to be freed, increase pagecnt_bias instead of\n+\t\t * decreasing page count.\n+\t\t */\n+\t\trx_buffer->pagecnt_bias++;\n+\t\treturn;\n \t}\n \n \t/* we need the header to contain the greater of either\n@@ -1109,7 +1101,12 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,\n \tskb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,\n \t\t\t(unsigned long)va & ~PAGE_MASK, size, truesize);\n \n-\treturn i40e_can_reuse_rx_page(rx_buffer, page, truesize);\n+\t/* page is being used so we must update the page offset */\n+#if (PAGE_SIZE < 8192)\n+\trx_buffer->page_offset ^= truesize;\n+#else\n+\trx_buffer->page_offset += truesize;\n+#endif\n }\n \n /**\n@@ -1135,6 +1132,9 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,\n \t\t\t\t      size,\n \t\t\t\t      DMA_FROM_DEVICE);\n \n+\t/* We have pulled a buffer for use, so decrement pagecnt_bias */\n+\trx_buffer->pagecnt_bias--;\n+\n \treturn rx_buffer;\n }\n \n@@ -1171,12 +1171,29 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,\n \t\t\t\t       GFP_ATOMIC | __GFP_NOWARN);\n \t\tif (unlikely(!skb)) {\n \t\t\trx_ring->rx_stats.alloc_buff_failed++;\n+\t\t\trx_buffer->pagecnt_bias++;\n \t\t\treturn NULL;\n \t\t}\n \t}\n \n \t/* pull page into skb */\n-\tif (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {\n+\ti40e_add_rx_frag(rx_ring, rx_buffer, skb, size);\n+\n+\treturn skb;\n+}\n+\n+/**\n+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free\n+ * @rx_ring: rx descriptor ring to transact packets on\n+ * @rx_buffer: rx buffer to pull data from\n+ *\n+ * This function will clean up the contents of the rx_buffer.  It will\n+ * either recycle the bufer or unmap it and free the associated resources.\n+ */\n+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,\n+\t\t\t       struct i40e_rx_buffer *rx_buffer)\n+{\n+\tif (i40e_can_reuse_rx_page(rx_buffer)) {\n \t\t/* hand second half of page back to the ring */\n \t\ti40e_reuse_rx_page(rx_ring, rx_buffer);\n \t\trx_ring->rx_stats.page_reuse_count++;\n@@ -1190,8 +1207,6 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,\n \n \t/* clear contents of buffer_info */\n \trx_buffer->page = NULL;\n-\n-\treturn skb;\n }\n \n /**\n@@ -1286,6 +1301,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)\n \t\tif (!skb)\n \t\t\tbreak;\n \n+\t\ti40e_put_rx_buffer(rx_ring, rx_buffer);\n \t\tcleaned_count++;\n \n \t\tif (i40e_is_non_eop(rx_ring, rx_desc, skb))\n",
    "prefixes": [
        "next",
        "S63",
        "3/6"
    ]
}