Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/867757/?format=api
{ "id": 867757, "url": "http://patchwork.ozlabs.org/api/patches/867757/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180131005148.19264.33449.stgit@localhost6.localdomain6/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20180131005148.19264.33449.stgit@localhost6.localdomain6>", "list_archive_url": null, "date": "2018-01-31T00:51:49", "name": "[8/9] ixgbevf: break out Rx buffer page management", "commit_ref": null, "pull_url": null, "state": "accepted", "archived": false, "hash": "24c29f953300f0f76e3da6ce03bf67e3bad0fe95", "submitter": { "id": 1670, "url": "http://patchwork.ozlabs.org/api/people/1670/?format=api", "name": "Tantilov, Emil S", "email": "emil.s.tantilov@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180131005148.19264.33449.stgit@localhost6.localdomain6/mbox/", "series": [ { "id": 26180, "url": "http://patchwork.ozlabs.org/api/series/26180/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=26180", "date": "2018-01-31T00:51:07", "name": "ixgbevf: build_skb support and related changes", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/26180/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/867757/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/867757/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.133; helo=hemlock.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "Received": [ "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3zWPgR1sp6z9ryQ\n\tfor <incoming@patchwork.ozlabs.org>;\n\tWed, 31 Jan 2018 11:45:55 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 7E62C88EA8;\n\tWed, 31 Jan 2018 00:45:53 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id oap89Wjj2K9H; Wed, 31 Jan 2018 00:45:52 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 8E8B888E3C;\n\tWed, 31 Jan 2018 00:45:52 +0000 (UTC)", "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\tby ash.osuosl.org (Postfix) with ESMTP id 9D0E01C4377\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 31 Jan 2018 00:45:51 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 994B088354\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 31 Jan 2018 00:45:51 +0000 (UTC)", "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id kPOcLW7Wde6F for <intel-wired-lan@lists.osuosl.org>;\n\tWed, 31 Jan 2018 00:45:50 +0000 (UTC)", "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n\tby whitealder.osuosl.org (Postfix) with ESMTPS id BF4BF876E9\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 31 Jan 2018 00:45:50 +0000 (UTC)", "from fmsmga004.fm.intel.com ([10.253.24.48])\n\tby orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t30 Jan 2018 16:45:50 -0800", "from estantil-desk3.jf.intel.com (HELO localhost6.localdomain6)\n\t([134.134.177.100])\n\tby fmsmga004.fm.intel.com with ESMTP; 30 Jan 2018 16:45:50 -0800" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.46,437,1511856000\"; d=\"scan'208\";a=\"26054196\"", "From": "Emil Tantilov <emil.s.tantilov@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Tue, 30 Jan 2018 16:51:49 -0800", "Message-ID": "<20180131005148.19264.33449.stgit@localhost6.localdomain6>", "In-Reply-To": "<20180131005015.19264.44085.stgit@localhost6.localdomain6>", "References": "<20180131005015.19264.44085.stgit@localhost6.localdomain6>", "User-Agent": "StGit/0.17.1-17-ge4e0", "MIME-Version": "1.0", "Subject": "[Intel-wired-lan] [PATCH 8/9] ixgbevf: break out Rx buffer page\n\tmanagement", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.24", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "Based on commit e014272672b9 (\"igb: Break out Rx buffer page management\")\n\nConsolidate Rx code paths to reduce duplication when we expand them in\nthe future.\n\n\nSigned-off-by: Emil Tantilov <emil.s.tantilov@intel.com>\n---\n drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 227 +++++++++++----------\n 1 file changed, 114 insertions(+), 113 deletions(-)", "diff": "diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c\nindex 754efb4..cd8c0a0 100644\n--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c\n+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c\n@@ -130,6 +130,9 @@ static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)\n static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);\n static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);\n static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);\n+static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);\n+static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,\n+\t\t\t\t struct ixgbevf_rx_buffer *old_buff);\n \n static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)\n {\n@@ -527,6 +530,49 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,\n \tskb->protocol = eth_type_trans(skb, rx_ring->netdev);\n }\n \n+static\n+struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,\n+\t\t\t\t\t\tconst unsigned int size)\n+{\n+\tstruct ixgbevf_rx_buffer *rx_buffer;\n+\n+\trx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];\n+\tprefetchw(rx_buffer->page);\n+\n+\t/* we are reusing so sync this buffer for CPU use */\n+\tdma_sync_single_range_for_cpu(rx_ring->dev,\n+\t\t\t\t rx_buffer->dma,\n+\t\t\t\t rx_buffer->page_offset,\n+\t\t\t\t size,\n+\t\t\t\t DMA_FROM_DEVICE);\n+\n+\trx_buffer->pagecnt_bias--;\n+\n+\treturn rx_buffer;\n+}\n+\n+static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,\n+\t\t\t\t struct ixgbevf_rx_buffer *rx_buffer)\n+{\n+\tif (ixgbevf_can_reuse_rx_page(rx_buffer)) {\n+\t\t/* hand second half of page back to the ring */\n+\t\tixgbevf_reuse_rx_page(rx_ring, rx_buffer);\n+\t} else {\n+\t\t/* We are not reusing the buffer so unmap it and free\n+\t\t * any references we are holding to it\n+\t\t */\n+\t\tdma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,\n+\t\t\t\t ixgbevf_rx_pg_size(rx_ring),\n+\t\t\t\t DMA_FROM_DEVICE,\n+\t\t\t\t IXGBEVF_RX_DMA_ATTR);\n+\t\t__page_frag_cache_drain(rx_buffer->page,\n+\t\t\t\t\trx_buffer->pagecnt_bias);\n+\t}\n+\n+\t/* clear contents of rx_buffer */\n+\trx_buffer->page = NULL;\n+}\n+\n /**\n * ixgbevf_is_non_eop - process handling of non-EOP buffers\n * @rx_ring: Rx ring being processed\n@@ -740,11 +786,10 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)\n \treturn (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);\n }\n \n-static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,\n-\t\t\t\t struct page *page,\n-\t\t\t\t const unsigned int truesize)\n+static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)\n {\n-\tunsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;\n+\tunsigned int pagecnt_bias = rx_buffer->pagecnt_bias;\n+\tstruct page *page = rx_buffer->page;\n \n \t/* avoid re-using remote pages */\n \tif (unlikely(ixgbevf_page_is_reserved(page)))\n@@ -752,16 +797,9 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,\n \n #if (PAGE_SIZE < 8192)\n \t/* if we are only owner of page we can reuse it */\n-\tif (unlikely(page_ref_count(page) != pagecnt_bias))\n+\tif (unlikely((page_ref_count(page) - pagecnt_bias) > 1))\n \t\treturn false;\n-\n-\t/* flip page offset to other buffer */\n-\trx_buffer->page_offset ^= truesize;\n-\n #else\n-\t/* move offset up to the next cache line */\n-\trx_buffer->page_offset += truesize;\n-\n #define IXGBEVF_LAST_OFFSET \\\n \t(SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)\n \n@@ -774,7 +812,7 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,\n \t * the pagecnt_bias and page count so that we fully restock the\n \t * number of references the driver holds.\n \t */\n-\tif (unlikely(pagecnt_bias == 1)) {\n+\tif (unlikely(!pagecnt_bias)) {\n \t\tpage_ref_add(page, USHRT_MAX);\n \t\trx_buffer->pagecnt_bias = USHRT_MAX;\n \t}\n@@ -786,25 +824,16 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,\n * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff\n * @rx_ring: rx descriptor ring to transact packets on\n * @rx_buffer: buffer containing page to add\n- * @rx_desc: descriptor containing length of buffer written by hardware\n * @skb: sk_buff to place the data into\n+ * @size: size of buffer to be added\n *\n * This function will add the data contained in rx_buffer->page to the skb.\n- * This is done either through a direct copy if the data in the buffer is\n- * less than the skb header size, otherwise it will just attach the page as\n- * a frag to the skb.\n- *\n- * The function will then update the page offset if necessary and return\n- * true if the buffer can be reused by the adapter.\n **/\n-static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,\n+static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,\n \t\t\t\tstruct ixgbevf_rx_buffer *rx_buffer,\n-\t\t\t\tu16 size,\n-\t\t\t\tunion ixgbe_adv_rx_desc *rx_desc,\n-\t\t\t\tstruct sk_buff *skb)\n+\t\t\t\tstruct sk_buff *skb,\n+\t\t\t\tunsigned int size)\n {\n-\tstruct page *page = rx_buffer->page;\n-\tvoid *va = page_address(page) + rx_buffer->page_offset;\n #if (PAGE_SIZE < 8192)\n \tunsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;\n #else\n@@ -812,102 +841,64 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,\n \t\t\t\tSKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :\n \t\t\t\tSKB_DATA_ALIGN(size);\n #endif\n-\tunsigned int pull_len;\n-\n-\tif (unlikely(skb_is_nonlinear(skb)))\n-\t\tgoto add_tail_frag;\n-\n-\tif (likely(size <= IXGBEVF_RX_HDR_SIZE)) {\n-\t\tmemcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));\n-\n-\t\t/* page is not reserved, we can reuse buffer as is */\n-\t\tif (likely(!ixgbevf_page_is_reserved(page)))\n-\t\t\treturn true;\n-\n-\t\t/* this page cannot be reused so discard it */\n-\t\treturn false;\n-\t}\n-\n-\t/* we need the header to contain the greater of either ETH_HLEN or\n-\t * 60 bytes if the skb->len is less than 60 for skb_pad.\n-\t */\n-\tpull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);\n-\n-\t/* align pull length to size of long to optimize memcpy performance */\n-\tmemcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));\n-\n-\t/* update all of the pointers */\n-\tva += pull_len;\n-\tsize -= pull_len;\n-\n-add_tail_frag:\n-\tskb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,\n-\t\t\tva - page_address(page), size, truesize);\n-\n-\treturn ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize);\n+\tskb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,\n+\t\t\trx_buffer->page_offset, size, truesize);\n+#if (PAGE_SIZE < 8192)\n+\trx_buffer->page_offset ^= truesize;\n+#else\n+\trx_buffer->page_offset += truesize;\n+#endif\n }\n \n-static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,\n-\t\t\t\t\t union ixgbe_adv_rx_desc *rx_desc,\n-\t\t\t\t\t struct sk_buff *skb)\n+static\n+struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,\n+\t\t\t\t struct ixgbevf_rx_buffer *rx_buffer,\n+\t\t\t\t union ixgbe_adv_rx_desc *rx_desc,\n+\t\t\t\t unsigned int size)\n {\n-\tstruct ixgbevf_rx_buffer *rx_buffer;\n-\tstruct page *page;\n-\tu16 size = le16_to_cpu(rx_desc->wb.upper.length);\n-\n-\trx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];\n-\tpage = rx_buffer->page;\n-\tprefetchw(page);\n-\n-\t/* we are reusing so sync this buffer for CPU use */\n-\tdma_sync_single_range_for_cpu(rx_ring->dev,\n-\t\t\t\t rx_buffer->dma,\n-\t\t\t\t rx_buffer->page_offset,\n-\t\t\t\t size,\n-\t\t\t\t DMA_FROM_DEVICE);\n-\n-\tif (likely(!skb)) {\n-\t\tvoid *va = page_address(page) + rx_buffer->page_offset;\n+\tvoid *va = page_address(rx_buffer->page) + rx_buffer->page_offset;\n+#if (PAGE_SIZE < 8192)\n+\tunsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;\n+#else\n+\tunsigned int truesize = SKB_DATA_ALIGN(size);\n+#endif\n+\tunsigned int headlen;\n+\tstruct sk_buff *skb;\n \n-\t\t/* prefetch first cache line of first page */\n-\t\tprefetch(va);\n+\t/* prefetch first cache line of first page */\n+\tprefetch(va);\n #if L1_CACHE_BYTES < 128\n-\t\tprefetch(va + L1_CACHE_BYTES);\n+\tprefetch(va + L1_CACHE_BYTES);\n #endif\n \n-\t\t/* allocate a skb to store the frags */\n-\t\tskb = netdev_alloc_skb_ip_align(rx_ring->netdev,\n-\t\t\t\t\t\tIXGBEVF_RX_HDR_SIZE);\n-\t\tif (unlikely(!skb)) {\n-\t\t\trx_ring->rx_stats.alloc_rx_buff_failed++;\n-\t\t\treturn NULL;\n-\t\t}\n+\t/* allocate a skb to store the frags */\n+\tskb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);\n+\tif (unlikely(!skb))\n+\t\treturn NULL;\n \n-\t\t/* we will be copying header into skb->data in\n-\t\t * pskb_may_pull so it is in our interest to prefetch\n-\t\t * it now to avoid a possible cache miss\n-\t\t */\n-\t\tprefetchw(skb->data);\n-\t}\n+\t/* Determine available headroom for copy */\n+\theadlen = size;\n+\tif (headlen > IXGBEVF_RX_HDR_SIZE)\n+\t\theadlen = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);\n \n-\t/* pull page into skb */\n-\tif (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {\n-\t\t/* hand second half of page back to the ring */\n-\t\tixgbevf_reuse_rx_page(rx_ring, rx_buffer);\n+\t/* align pull length to size of long to optimize memcpy performance */\n+\tmemcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));\n+\n+\t/* update all of the pointers */\n+\tsize -= headlen;\n+\tif (size) {\n+\t\tskb_add_rx_frag(skb, 0, rx_buffer->page,\n+\t\t\t\t(va + headlen) - page_address(rx_buffer->page),\n+\t\t\t\tsize, truesize);\n+#if (PAGE_SIZE < 8192)\n+\t\trx_buffer->page_offset ^= truesize;\n+#else\n+\t\trx_buffer->page_offset += truesize;\n+#endif\n \t} else {\n-\t\t/* We are not reusing the buffer so unmap it and free\n-\t\t * any references we are holding to it\n-\t\t */\n-\t\tdma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,\n-\t\t\t\t ixgbevf_rx_pg_size(rx_ring),\n-\t\t\t\t DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);\n-\t\t__page_frag_cache_drain(page, rx_buffer->pagecnt_bias);\n+\t\trx_buffer->pagecnt_bias++;\n \t}\n \n-\t/* clear contents of buffer_info */\n-\trx_buffer->dma = 0;\n-\trx_buffer->page = NULL;\n-\n \treturn skb;\n }\n \n@@ -929,6 +920,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,\n \n \twhile (likely(total_rx_packets < budget)) {\n \t\tunion ixgbe_adv_rx_desc *rx_desc;\n+\t\tstruct ixgbevf_rx_buffer *rx_buffer;\n+\t\tunsigned int size;\n \n \t\t/* return some buffers to hardware, one at a time is too slow */\n \t\tif (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {\n@@ -937,8 +930,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,\n \t\t}\n \n \t\trx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);\n-\n-\t\tif (!rx_desc->wb.upper.length)\n+\t\tsize = le16_to_cpu(rx_desc->wb.upper.length);\n+\t\tif (!size)\n \t\t\tbreak;\n \n \t\t/* This memory barrier is needed to keep us from reading\n@@ -947,15 +940,23 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,\n \t\t */\n \t\trmb();\n \n+\t\trx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);\n+\n \t\t/* retrieve a buffer from the ring */\n-\t\tskb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);\n+\t\tif (skb)\n+\t\t\tixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);\n+\t\telse\n+\t\t\tskb = ixgbevf_construct_skb(rx_ring, rx_buffer,\n+\t\t\t\t\t\t rx_desc, size);\n \n \t\t/* exit if we failed to retrieve a buffer */\n \t\tif (!skb) {\n \t\t\trx_ring->rx_stats.alloc_rx_buff_failed++;\n+\t\t\trx_buffer->pagecnt_bias++;\n \t\t\tbreak;\n \t\t}\n \n+\t\tixgbevf_put_rx_buffer(rx_ring, rx_buffer);\n \t\tcleaned_count++;\n \n \t\t/* fetch next buffer in frame if non-eop */\n", "prefixes": [ "8/9" ] }