get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/700234/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 700234,
    "url": "http://patchwork.ozlabs.org/api/patches/700234/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/1480377971-23412-13-git-send-email-bimmy.pujari@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<1480377971-23412-13-git-send-email-bimmy.pujari@intel.com>",
    "list_archive_url": null,
    "date": "2016-11-29T00:06:09",
    "name": "[next,S55,12/14] i40e/i40evf: elimitate i40e_pull_tail()",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "f89b4214fe62ed305c6132a972c41ce6db59dd75",
    "submitter": {
        "id": 68919,
        "url": "http://patchwork.ozlabs.org/api/people/68919/?format=api",
        "name": "Pujari, Bimmy",
        "email": "bimmy.pujari@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/1480377971-23412-13-git-send-email-bimmy.pujari@intel.com/mbox/",
    "series": [],
    "comments": "http://patchwork.ozlabs.org/api/patches/700234/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/700234/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@lists.osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Received": [
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3tSP5W0mcrz9tlW\n\tfor <incoming@patchwork.ozlabs.org>;\n\tTue, 29 Nov 2016 11:08:15 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 93B86888A9;\n\tTue, 29 Nov 2016 00:08:13 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id Nl7NE3OuRg7O; Tue, 29 Nov 2016 00:08:06 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id EA6F5889E0;\n\tTue, 29 Nov 2016 00:07:54 +0000 (UTC)",
            "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id 4AF481C0975\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 29 Nov 2016 00:07:51 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 4704D93A8C\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 29 Nov 2016 00:07:51 +0000 (UTC)",
            "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id kCLe6f0nARq2 for <intel-wired-lan@lists.osuosl.org>;\n\tTue, 29 Nov 2016 00:07:49 +0000 (UTC)",
            "from mga06.intel.com (mga06.intel.com [134.134.136.31])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id 28CE8946B4\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 29 Nov 2016 00:07:49 +0000 (UTC)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n\tby orsmga104.jf.intel.com with ESMTP; 28 Nov 2016 16:07:47 -0800",
            "from bimmy.jf.intel.com (HELO bimmy.linux1.jf.intel.com)\n\t([134.134.2.167])\n\tby orsmga004.jf.intel.com with ESMTP; 28 Nov 2016 16:07:47 -0800"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.31,565,1473145200\"; d=\"scan'208\";a=\"35284077\"",
        "From": "Bimmy Pujari <bimmy.pujari@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Mon, 28 Nov 2016 16:06:09 -0800",
        "Message-Id": "<1480377971-23412-13-git-send-email-bimmy.pujari@intel.com>",
        "X-Mailer": "git-send-email 2.4.11",
        "In-Reply-To": "<1480377971-23412-1-git-send-email-bimmy.pujari@intel.com>",
        "References": "<1480377971-23412-1-git-send-email-bimmy.pujari@intel.com>",
        "Subject": "[Intel-wired-lan] [next PATCH S55 12/14] i40e/i40evf: elimitate\n\ti40e_pull_tail()",
        "X-BeenThere": "intel-wired-lan@lists.osuosl.org",
        "X-Mailman-Version": "2.1.18-1",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.lists.osuosl.org>",
        "List-Unsubscribe": "<http://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@lists.osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@lists.osuosl.org?subject=help>",
        "List-Subscribe": "<http://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=subscribe>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@lists.osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@lists.osuosl.org>"
    },
    "content": "From: \"Scott Peterson\" <scott.d.peterson@intel.com>\n\nReorganize the i40e_pull_tail() logic, doing it in i40e_add_rx_frag()\nwhere it's cheaper.  The igb driver does this the same way.\n\nAlso renames i40e_page_is_reserved() to reflect what it actually\ntests.\n\nSigned-off-by: Scott Peterson <scott.d.peterson@intel.com>\nChange-ID: Icd9cc507aae1fcdc02308b3a09034111b4c24071\n---\nTesting Hints:\nNeeds data integrity verification (e.g. rsync with checksums twice)\n\n drivers/net/ethernet/intel/i40e/i40e_txrx.c   | 169 ++++++++++++++------------\n drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 169 ++++++++++++++------------\n 2 files changed, 186 insertions(+), 152 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c\nindex 43de4c4..3e99206 100644\n--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c\n+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c\n@@ -1426,45 +1426,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_pull_tail - i40e specific version of skb_pull_tail\n- * @rx_ring: rx descriptor ring packet is being transacted on\n- * @skb: pointer to current skb being adjusted\n- *\n- * This function is an i40e specific version of __pskb_pull_tail.  The\n- * main difference between this version and the original function is that\n- * this function can make several assumptions about the state of things\n- * that allow for significant optimizations versus the standard function.\n- * As a result we can do things like drop a frag and maintain an accurate\n- * truesize for the skb.\n- */\n-static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)\n-{\n-\tstruct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];\n-\tunsigned char *va;\n-\tunsigned int pull_len;\n-\n-\t/* it is valid to use page_address instead of kmap since we are\n-\t * working with pages allocated out of the lomem pool per\n-\t * alloc_page(GFP_ATOMIC)\n-\t */\n-\tva = skb_frag_address(frag);\n-\n-\t/* we need the header to contain the greater of either ETH_HLEN or\n-\t * 60 bytes if the skb->len is less than 60 for skb_pad.\n-\t */\n-\tpull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);\n-\n-\t/* align pull length to size of long to optimize memcpy performance */\n-\tskb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));\n-\n-\t/* update all of the pointers */\n-\tskb_frag_size_sub(frag, pull_len);\n-\tfrag->page_offset += pull_len;\n-\tskb->data_len -= pull_len;\n-\tskb->tail += pull_len;\n-}\n-\n-/**\n  * i40e_cleanup_headers - Correct empty headers\n  * @rx_ring: rx descriptor ring packet is being transacted on\n  * @skb: pointer to current skb being fixed\n@@ -1479,10 +1440,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)\n  **/\n static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)\n {\n-\t/* place header in linear portion of buffer */\n-\tif (skb_is_nonlinear(skb))\n-\t\ti40e_pull_tail(rx_ring, skb);\n-\n \t/* if eth_skb_pad returns an error the skb was freed */\n \tif (eth_skb_pad(skb))\n \t\treturn true;\n@@ -1514,12 +1471,78 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_page_is_reserved - check if reuse is possible\n+ * i40e_page_is_reusable - check if any reuse is possible\n  * @page: page struct to check\n+ *\n+ * A page is not reusable if it was allocated under low memory\n+ * conditions, or it's not in the same NUMA node as this CPU.\n  */\n-static inline bool i40e_page_is_reserved(struct page *page)\n+static inline bool i40e_page_is_reusable(struct page *page)\n {\n-\treturn (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);\n+\treturn (page_to_nid(page) == numa_mem_id()) &&\n+\t\t!page_is_pfmemalloc(page);\n+}\n+\n+/**\n+ * i40e_can_reuse_rx_page - Determine if this page can be reused by\n+ * the adapter for another receive\n+ *\n+ * @rx_buffer: buffer containing the page\n+ * @page: page address from rx_buffer\n+ * @truesize: actual size of the buffer in this page\n+ *\n+ * If page is reusable, rx_buffer->page_offset is adjusted to point to\n+ * an unused region in the page.\n+ *\n+ * For small pages, @truesize will be a constant value, half the size\n+ * of the memory at page.  We'll attempt to alternate between high and\n+ * low halves of the page, with one half ready for use by the hardware\n+ * and the other half being consumed by the stack.  We use the page\n+ * ref count to determine whether the stack has finished consuming the\n+ * portion of this page that was passed up with a previous packet.  If\n+ * the page ref count is >1, we'll assume the \"other\" half page is\n+ * still busy, and this page cannot be reused.\n+ *\n+ * For larger pages, @truesize will be the actual space used by the\n+ * received packet (adjusted upward to an even multiple of the cache\n+ * line size).  This will advance through the page by the amount\n+ * actually consumed by the received packets while there is still\n+ * space for a buffer.  Each region of larger pages will be used at\n+ * most once, after which the page will not be reused.\n+ *\n+ * In either case, if the page is reusable its refcount is increased.\n+ **/\n+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,\n+\t\t\t\t   struct page *page,\n+\t\t\t\t   const unsigned int truesize)\n+{\n+#if (PAGE_SIZE >= 8192)\n+\tunsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;\n+#endif\n+\n+\t/* Is any reuse possible? */\n+\tif (unlikely(!i40e_page_is_reusable(page)))\n+\t\treturn false;\n+\n+#if (PAGE_SIZE < 8192)\n+\t/* if we are only owner of page we can reuse it */\n+\tif (unlikely(page_count(page) != 1))\n+\t\treturn false;\n+\n+\t/* flip page offset to other buffer */\n+\trx_buffer->page_offset ^= truesize;\n+#else\n+\t/* move offset up to the next cache line */\n+\trx_buffer->page_offset += truesize;\n+\n+\tif (rx_buffer->page_offset > last_offset)\n+\t\treturn false;\n+#endif\n+\n+\t/* Inc ref count on page before passing it up to the stack */\n+\tget_page(page);\n+\n+\treturn true;\n }\n \n /**\n@@ -1543,23 +1566,25 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,\n \t\t\t     struct sk_buff *skb)\n {\n \tstruct page *page = rx_buffer->page;\n+\tunsigned char *va = page_address(page) + rx_buffer->page_offset;\n #if (PAGE_SIZE < 8192)\n \tunsigned int truesize = I40E_RXBUFFER_2048;\n #else\n \tunsigned int truesize = ALIGN(size, L1_CACHE_BYTES);\n-\tunsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;\n #endif\n+\tunsigned int pull_len;\n+\n+\tif (unlikely(skb_is_nonlinear(skb)))\n+\t\tgoto add_tail_frag;\n \n \t/* will the data fit in the skb we allocated? if so, just\n \t * copy it as it is pretty small anyway\n \t */\n-\tif ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {\n-\t\tunsigned char *va = page_address(page) + rx_buffer->page_offset;\n-\n+\tif (size <= I40E_RX_HDR_SIZE) {\n \t\tmemcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));\n \n-\t\t/* page is not reserved, we can reuse buffer as-is */\n-\t\tif (likely(!i40e_page_is_reserved(page)))\n+\t\t/* page is reusable, we can reuse buffer as-is */\n+\t\tif (likely(i40e_page_is_reusable(page)))\n \t\t\treturn true;\n \n \t\t/* this page cannot be reused so discard it */\n@@ -1567,34 +1592,26 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,\n \t\treturn false;\n \t}\n \n-\tskb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,\n-\t\t\trx_buffer->page_offset, size, truesize);\n-\n-\t/* avoid re-using remote pages */\n-\tif (unlikely(i40e_page_is_reserved(page)))\n-\t\treturn false;\n-\n-#if (PAGE_SIZE < 8192)\n-\t/* if we are only owner of page we can reuse it */\n-\tif (unlikely(page_count(page) != 1))\n-\t\treturn false;\n+\t/* we need the header to contain the greater of either\n+\t * ETH_HLEN or 60 bytes if the skb->len is less than\n+\t * 60 for skb_pad.\n+\t */\n+\tpull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);\n \n-\t/* flip page offset to other buffer */\n-\trx_buffer->page_offset ^= truesize;\n-#else\n-\t/* move offset up to the next cache line */\n-\trx_buffer->page_offset += truesize;\n+\t/* align pull length to size of long to optimize\n+\t * memcpy performance\n+\t */\n+\tmemcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));\n \n-\tif (rx_buffer->page_offset > last_offset)\n-\t\treturn false;\n-#endif\n+\t/* update all of the pointers */\n+\tva += pull_len;\n+\tsize -= pull_len;\n \n-\t/* Even if we own the page, we are not allowed to use atomic_set()\n-\t * This would break get_page_unless_zero() users.\n-\t */\n-\tget_page(rx_buffer->page);\n+add_tail_frag:\n+\tskb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,\n+\t\t\t(unsigned long)va & ~PAGE_MASK, size, truesize);\n \n-\treturn true;\n+\treturn i40e_can_reuse_rx_page(rx_buffer, page, truesize);\n }\n \n /**\ndiff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c\nindex 04c266c..4870cb5 100644\n--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c\n+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c\n@@ -904,45 +904,6 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_pull_tail - i40e specific version of skb_pull_tail\n- * @rx_ring: rx descriptor ring packet is being transacted on\n- * @skb: pointer to current skb being adjusted\n- *\n- * This function is an i40e specific version of __pskb_pull_tail.  The\n- * main difference between this version and the original function is that\n- * this function can make several assumptions about the state of things\n- * that allow for significant optimizations versus the standard function.\n- * As a result we can do things like drop a frag and maintain an accurate\n- * truesize for the skb.\n- */\n-static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)\n-{\n-\tstruct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];\n-\tunsigned char *va;\n-\tunsigned int pull_len;\n-\n-\t/* it is valid to use page_address instead of kmap since we are\n-\t * working with pages allocated out of the lomem pool per\n-\t * alloc_page(GFP_ATOMIC)\n-\t */\n-\tva = skb_frag_address(frag);\n-\n-\t/* we need the header to contain the greater of either ETH_HLEN or\n-\t * 60 bytes if the skb->len is less than 60 for skb_pad.\n-\t */\n-\tpull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);\n-\n-\t/* align pull length to size of long to optimize memcpy performance */\n-\tskb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));\n-\n-\t/* update all of the pointers */\n-\tskb_frag_size_sub(frag, pull_len);\n-\tfrag->page_offset += pull_len;\n-\tskb->data_len -= pull_len;\n-\tskb->tail += pull_len;\n-}\n-\n-/**\n  * i40e_cleanup_headers - Correct empty headers\n  * @rx_ring: rx descriptor ring packet is being transacted on\n  * @skb: pointer to current skb being fixed\n@@ -957,10 +918,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)\n  **/\n static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)\n {\n-\t/* place header in linear portion of buffer */\n-\tif (skb_is_nonlinear(skb))\n-\t\ti40e_pull_tail(rx_ring, skb);\n-\n \t/* if eth_skb_pad returns an error the skb was freed */\n \tif (eth_skb_pad(skb))\n \t\treturn true;\n@@ -992,12 +949,78 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,\n }\n \n /**\n- * i40e_page_is_reserved - check if reuse is possible\n+ * i40e_page_is_reusable - check if any reuse is possible\n  * @page: page struct to check\n+ *\n+ * A page is not reusable if it was allocated under low memory\n+ * conditions, or it's not in the same NUMA node as this CPU.\n  */\n-static inline bool i40e_page_is_reserved(struct page *page)\n+static inline bool i40e_page_is_reusable(struct page *page)\n {\n-\treturn (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);\n+\treturn (page_to_nid(page) == numa_mem_id()) &&\n+\t\t!page_is_pfmemalloc(page);\n+}\n+\n+/**\n+ * i40e_can_reuse_rx_page - Determine if this page can be reused by\n+ * the adapter for another receive\n+ *\n+ * @rx_buffer: buffer containing the page\n+ * @page: page address from rx_buffer\n+ * @truesize: actual size of the buffer in this page\n+ *\n+ * If page is reusable, rx_buffer->page_offset is adjusted to point to\n+ * an unused region in the page.\n+ *\n+ * For small pages, @truesize will be a constant value, half the size\n+ * of the memory at page.  We'll attempt to alternate between high and\n+ * low halves of the page, with one half ready for use by the hardware\n+ * and the other half being consumed by the stack.  We use the page\n+ * ref count to determine whether the stack has finished consuming the\n+ * portion of this page that was passed up with a previous packet.  If\n+ * the page ref count is >1, we'll assume the \"other\" half page is\n+ * still busy, and this page cannot be reused.\n+ *\n+ * For larger pages, @truesize will be the actual space used by the\n+ * received packet (adjusted upward to an even multiple of the cache\n+ * line size).  This will advance through the page by the amount\n+ * actually consumed by the received packets while there is still\n+ * space for a buffer.  Each region of larger pages will be used at\n+ * most once, after which the page will not be reused.\n+ *\n+ * In either case, if the page is reusable its refcount is increased.\n+ **/\n+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,\n+\t\t\t\t   struct page *page,\n+\t\t\t\t   const unsigned int truesize)\n+{\n+#if (PAGE_SIZE >= 8192)\n+\tunsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;\n+#endif\n+\n+\t/* Is any reuse possible? */\n+\tif (unlikely(!i40e_page_is_reusable(page)))\n+\t\treturn false;\n+\n+#if (PAGE_SIZE < 8192)\n+\t/* if we are only owner of page we can reuse it */\n+\tif (unlikely(page_count(page) != 1))\n+\t\treturn false;\n+\n+\t/* flip page offset to other buffer */\n+\trx_buffer->page_offset ^= truesize;\n+#else\n+\t/* move offset up to the next cache line */\n+\trx_buffer->page_offset += truesize;\n+\n+\tif (rx_buffer->page_offset > last_offset)\n+\t\treturn false;\n+#endif\n+\n+\t/* Inc ref count on page before passing it up to the stack */\n+\tget_page(page);\n+\n+\treturn true;\n }\n \n /**\n@@ -1021,23 +1044,25 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,\n \t\t\t     struct sk_buff *skb)\n {\n \tstruct page *page = rx_buffer->page;\n+\tunsigned char *va = page_address(page) + rx_buffer->page_offset;\n #if (PAGE_SIZE < 8192)\n \tunsigned int truesize = I40E_RXBUFFER_2048;\n #else\n \tunsigned int truesize = ALIGN(size, L1_CACHE_BYTES);\n-\tunsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;\n #endif\n+\tunsigned int pull_len;\n+\n+\tif (unlikely(skb_is_nonlinear(skb)))\n+\t\tgoto add_tail_frag;\n \n \t/* will the data fit in the skb we allocated? if so, just\n \t * copy it as it is pretty small anyway\n \t */\n-\tif ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {\n-\t\tunsigned char *va = page_address(page) + rx_buffer->page_offset;\n-\n+\tif (size <= I40E_RX_HDR_SIZE) {\n \t\tmemcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));\n \n-\t\t/* page is not reserved, we can reuse buffer as-is */\n-\t\tif (likely(!i40e_page_is_reserved(page)))\n+\t\t/* page is reusable, we can reuse buffer as-is */\n+\t\tif (likely(i40e_page_is_reusable(page)))\n \t\t\treturn true;\n \n \t\t/* this page cannot be reused so discard it */\n@@ -1045,34 +1070,26 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,\n \t\treturn false;\n \t}\n \n-\tskb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,\n-\t\t\trx_buffer->page_offset, size, truesize);\n-\n-\t/* avoid re-using remote pages */\n-\tif (unlikely(i40e_page_is_reserved(page)))\n-\t\treturn false;\n-\n-#if (PAGE_SIZE < 8192)\n-\t/* if we are only owner of page we can reuse it */\n-\tif (unlikely(page_count(page) != 1))\n-\t\treturn false;\n+\t/* we need the header to contain the greater of either\n+\t * ETH_HLEN or 60 bytes if the skb->len is less than\n+\t * 60 for skb_pad.\n+\t */\n+\tpull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);\n \n-\t/* flip page offset to other buffer */\n-\trx_buffer->page_offset ^= truesize;\n-#else\n-\t/* move offset up to the next cache line */\n-\trx_buffer->page_offset += truesize;\n+\t/* align pull length to size of long to optimize\n+\t * memcpy performance\n+\t */\n+\tmemcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));\n \n-\tif (rx_buffer->page_offset > last_offset)\n-\t\treturn false;\n-#endif\n+\t/* update all of the pointers */\n+\tva += pull_len;\n+\tsize -= pull_len;\n \n-\t/* Even if we own the page, we are not allowed to use atomic_set()\n-\t * This would break get_page_unless_zero() users.\n-\t */\n-\tget_page(rx_buffer->page);\n+add_tail_frag:\n+\tskb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,\n+\t\t\t(unsigned long)va & ~PAGE_MASK, size, truesize);\n \n-\treturn true;\n+\treturn i40e_can_reuse_rx_page(rx_buffer, page, truesize);\n }\n \n /**\n",
    "prefixes": [
        "next",
        "S55",
        "12/14"
    ]
}