get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/747505/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 747505,
    "url": "http://patchwork.ozlabs.org/api/patches/747505/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20170405115103.67374-9-alice.michael@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20170405115103.67374-9-alice.michael@intel.com>",
    "list_archive_url": null,
    "date": "2017-04-05T11:51:01",
    "name": "[next,S66,v2,09/11] i40e/i40evf: Add support for using order 1 pages with a 3K buffer",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "cccfe8604f5079095e7b44e61eb73b015c8a1b22",
    "submitter": {
        "id": 71123,
        "url": "http://patchwork.ozlabs.org/api/people/71123/?format=api",
        "name": "Michael, Alice",
        "email": "alice.michael@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20170405115103.67374-9-alice.michael@intel.com/mbox/",
    "series": [],
    "comments": "http://patchwork.ozlabs.org/api/patches/747505/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/747505/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@lists.osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Received": [
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3vyxP50c1Tz9s8c\n\tfor <incoming@patchwork.ozlabs.org>;\n\tThu,  6 Apr 2017 05:54:01 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 91D058615E;\n\tWed,  5 Apr 2017 19:53:59 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id r1VBWrmXFYOp; Wed,  5 Apr 2017 19:53:53 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 719EE863E8;\n\tWed,  5 Apr 2017 19:53:51 +0000 (UTC)",
            "from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n\tby ash.osuosl.org (Postfix) with ESMTP id CEB011C0100\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed,  5 Apr 2017 19:53:48 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id C924486B9D\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed,  5 Apr 2017 19:53:48 +0000 (UTC)",
            "from fraxinus.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id lr6nv7NsziZt for <intel-wired-lan@lists.osuosl.org>;\n\tWed,  5 Apr 2017 19:53:47 +0000 (UTC)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby fraxinus.osuosl.org (Postfix) with ESMTPS id C573786BCA\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed,  5 Apr 2017 19:53:47 +0000 (UTC)",
            "from orsmga003.jf.intel.com ([10.7.209.27])\n\tby orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t05 Apr 2017 12:53:47 -0700",
            "from unknown (HELO localhost.jf.intel.com) ([10.166.16.121])\n\tby orsmga003.jf.intel.com with ESMTP; 05 Apr 2017 12:53:47 -0700"
        ],
        "Authentication-Results": "ozlabs.org;\n\tdkim=fail reason=\"key not found in DNS\" (0-bit key;\n\tunprotected) header.d=intel.com header.i=@intel.com\n\theader.b=\"RK1UdoFP\"; dkim-atps=neutral",
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=simple/simple;\n\td=intel.com; i=@intel.com; q=dns/txt; s=intel;\n\tt=1491422027; x=1522958027;\n\th=from:to:cc:subject:date:message-id:in-reply-to: references;\n\tbh=ByTpsXMojHTR+GDV7jIAh/wsJqrkN796EaFWV1PpBJY=;\n\tb=RK1UdoFP0SCx2ZCWWinMadg7oZ6u7nQ34JoW6YtLUgMjb0H6GObEZOb4\n\tZc8KBcwj3kQqg8nF93LOOZuKh+Fo/A==;",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.37,280,1488873600\"; d=\"scan'208\";a=\"952301604\"",
        "From": "Alice Michael <alice.michael@intel.com>",
        "To": "alice.michael@intel.com,\n\tintel-wired-lan@lists.osuosl.org",
        "Date": "Wed,  5 Apr 2017 07:51:01 -0400",
        "Message-Id": "<20170405115103.67374-9-alice.michael@intel.com>",
        "X-Mailer": "git-send-email 2.9.3",
        "In-Reply-To": "<20170405115103.67374-1-alice.michael@intel.com>",
        "References": "<20170405115103.67374-1-alice.michael@intel.com>",
        "Subject": "[Intel-wired-lan] [next S66 v2 09/11] i40e/i40evf: Add support for\n\tusing order 1 pages with a 3K buffer",
        "X-BeenThere": "intel-wired-lan@lists.osuosl.org",
        "X-Mailman-Version": "2.1.18-1",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.lists.osuosl.org>",
        "List-Unsubscribe": "<http://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@lists.osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@lists.osuosl.org?subject=help>",
        "List-Subscribe": "<http://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=subscribe>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@lists.osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@lists.osuosl.org>"
    },
    "content": "From: Alexander Duyck <alexander.h.duyck@intel.com>\n\nThere are situations where adding padding to the front and back of an Rx\nbuffer will require that we add additional padding.  Specifically if\nNET_IP_ALIGN is non-zero, or the MTU size is larger than 7.5K we would need\nto use 2K buffers which leaves us with no room for the padding.\n\nTo preemptively address these cases I am adding support for 3K buffers to\nthe Rx path so that we can provide the additional padding needed in the\nevent of NET_IP_ALIGN being non-zero or a cache line being greater than 64.\n\nSigned-off-by: Alexander Duyck <alexander.h.duyck@intel.com>\nChange-ID: I938bc1ba611285428df39a613cd66f98e60b55c7\n---\n drivers/net/ethernet/intel/i40e/i40e_main.c     |  3 ++-\n drivers/net/ethernet/intel/i40e/i40e_txrx.c     | 27 +++++++++++++------------\n drivers/net/ethernet/intel/i40e/i40e_txrx.h     | 12 +++++++++++\n drivers/net/ethernet/intel/i40evf/i40e_txrx.c   | 27 +++++++++++++------------\n drivers/net/ethernet/intel/i40evf/i40e_txrx.h   | 12 +++++++++++\n drivers/net/ethernet/intel/i40evf/i40evf_main.c |  6 ++++++\n 6 files changed, 60 insertions(+), 27 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c\nindex 89f3659..859c8e1 100644\n--- a/drivers/net/ethernet/intel/i40e/i40e_main.c\n+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c\n@@ -3085,7 +3085,8 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)\n #endif\n \t} else {\n \t\tvsi->max_frame = I40E_MAX_RXBUFFER;\n-\t\tvsi->rx_buf_len = I40E_RXBUFFER_2048;\n+\t\tvsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :\n+\t\t\t\t\t\t       I40E_RXBUFFER_2048;\n \t}\n \n \t/* set up individual rings */\ndiff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c\nindex e95428c..bee1672 100644\n--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c\n+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c\n@@ -1138,14 +1138,15 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)\n \t\tdma_sync_single_range_for_cpu(rx_ring->dev,\n \t\t\t\t\t      rx_bi->dma,\n \t\t\t\t\t      rx_bi->page_offset,\n-\t\t\t\t\t      I40E_RXBUFFER_2048,\n+\t\t\t\t\t      rx_ring->rx_buf_len,\n \t\t\t\t\t      DMA_FROM_DEVICE);\n \n \t\t/* free resources associated with mapping */\n \t\tdma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,\n-\t\t\t\t     PAGE_SIZE,\n+\t\t\t\t     i40e_rx_pg_size(rx_ring),\n \t\t\t\t     DMA_FROM_DEVICE,\n \t\t\t\t     I40E_RX_DMA_ATTR);\n+\n \t\t__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);\n \n \t\trx_bi->page = NULL;\n@@ -1267,7 +1268,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,\n \t}\n \n \t/* alloc new page for storage */\n-\tpage = dev_alloc_page();\n+\tpage = dev_alloc_pages(i40e_rx_pg_order(rx_ring));\n \tif (unlikely(!page)) {\n \t\trx_ring->rx_stats.alloc_page_failed++;\n \t\treturn false;\n@@ -1275,7 +1276,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,\n \n \t/* map page for use */\n \tdma = dma_map_page_attrs(rx_ring->dev, page, 0,\n-\t\t\t\t PAGE_SIZE,\n+\t\t\t\t i40e_rx_pg_size(rx_ring),\n \t\t\t\t DMA_FROM_DEVICE,\n \t\t\t\t I40E_RX_DMA_ATTR);\n \n@@ -1283,7 +1284,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,\n \t * there isn't much point in holding memory we can't use\n \t */\n \tif (dma_mapping_error(rx_ring->dev, dma)) {\n-\t\t__free_pages(page, 0);\n+\t\t__free_pages(page, i40e_rx_pg_order(rx_ring));\n \t\trx_ring->rx_stats.alloc_page_failed++;\n \t\treturn false;\n \t}\n@@ -1343,7 +1344,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)\n \t\t/* sync the buffer for use by the device */\n \t\tdma_sync_single_range_for_device(rx_ring->dev, bi->dma,\n \t\t\t\t\t\t bi->page_offset,\n-\t\t\t\t\t\t I40E_RXBUFFER_2048,\n+\t\t\t\t\t\t rx_ring->rx_buf_len,\n \t\t\t\t\t\t DMA_FROM_DEVICE);\n \n \t\t/* Refresh the desc even if buffer_addrs didn't change\n@@ -1645,9 +1646,6 @@ static inline bool i40e_page_is_reusable(struct page *page)\n  **/\n static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)\n {\n-#if (PAGE_SIZE >= 8192)\n-\tunsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;\n-#endif\n \tunsigned int pagecnt_bias = rx_buffer->pagecnt_bias;\n \tstruct page *page = rx_buffer->page;\n \n@@ -1660,7 +1658,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)\n \tif (unlikely((page_count(page) - pagecnt_bias) > 1))\n \t\treturn false;\n #else\n-\tif (rx_buffer->page_offset > last_offset)\n+#define I40E_LAST_OFFSET \\\n+\t(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)\n+\tif (rx_buffer->page_offset > I40E_LAST_OFFSET)\n \t\treturn false;\n #endif\n \n@@ -1694,7 +1694,7 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,\n \t\t\t     unsigned int size)\n {\n #if (PAGE_SIZE < 8192)\n-\tunsigned int truesize = I40E_RXBUFFER_2048;\n+\tunsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;\n #else\n \tunsigned int truesize = SKB_DATA_ALIGN(size);\n #endif\n@@ -1755,7 +1755,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,\n {\n \tvoid *va = page_address(rx_buffer->page) + rx_buffer->page_offset;\n #if (PAGE_SIZE < 8192)\n-\tunsigned int truesize = I40E_RXBUFFER_2048;\n+\tunsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;\n #else\n \tunsigned int truesize = SKB_DATA_ALIGN(size);\n #endif\n@@ -1821,7 +1821,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,\n \t\trx_ring->rx_stats.page_reuse_count++;\n \t} else {\n \t\t/* we are not reusing the buffer so unmap it */\n-\t\tdma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,\n+\t\tdma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,\n+\t\t\t\t     i40e_rx_pg_size(rx_ring),\n \t\t\t\t     DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);\n \t\t__page_frag_cache_drain(rx_buffer->page,\n \t\t\t\t\trx_buffer->pagecnt_bias);\ndiff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h\nindex bc66ec4..2f61853 100644\n--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h\n+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h\n@@ -119,6 +119,7 @@ enum i40e_dyn_idx_t {\n #define I40E_RXBUFFER_256   256\n #define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */\n #define I40E_RXBUFFER_2048  2048\n+#define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */\n #define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */\n \n /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we\n@@ -389,6 +390,17 @@ struct i40e_ring_container {\n #define i40e_for_each_ring(pos, head) \\\n \tfor (pos = (head).ring; pos != NULL; pos = pos->next)\n \n+static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)\n+{\n+#if (PAGE_SIZE < 8192)\n+\tif (ring->rx_buf_len > (PAGE_SIZE / 2))\n+\t\treturn 1;\n+#endif\n+\treturn 0;\n+}\n+\n+#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))\n+\n bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);\n netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);\n void i40e_clean_tx_ring(struct i40e_ring *tx_ring);\ndiff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c\nindex 95e383a..6b60c19 100644\n--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c\n+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c\n@@ -509,14 +509,15 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)\n \t\tdma_sync_single_range_for_cpu(rx_ring->dev,\n \t\t\t\t\t      rx_bi->dma,\n \t\t\t\t\t      rx_bi->page_offset,\n-\t\t\t\t\t      I40E_RXBUFFER_2048,\n+\t\t\t\t\t      rx_ring->rx_buf_len,\n \t\t\t\t\t      DMA_FROM_DEVICE);\n \n \t\t/* free resources associated with mapping */\n \t\tdma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,\n-\t\t\t\t     PAGE_SIZE,\n+\t\t\t\t     i40e_rx_pg_size(rx_ring),\n \t\t\t\t     DMA_FROM_DEVICE,\n \t\t\t\t     I40E_RX_DMA_ATTR);\n+\n \t\t__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);\n \n \t\trx_bi->page = NULL;\n@@ -638,7 +639,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,\n \t}\n \n \t/* alloc new page for storage */\n-\tpage = dev_alloc_page();\n+\tpage = dev_alloc_pages(i40e_rx_pg_order(rx_ring));\n \tif (unlikely(!page)) {\n \t\trx_ring->rx_stats.alloc_page_failed++;\n \t\treturn false;\n@@ -646,7 +647,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,\n \n \t/* map page for use */\n \tdma = dma_map_page_attrs(rx_ring->dev, page, 0,\n-\t\t\t\t PAGE_SIZE,\n+\t\t\t\t i40e_rx_pg_size(rx_ring),\n \t\t\t\t DMA_FROM_DEVICE,\n \t\t\t\t I40E_RX_DMA_ATTR);\n \n@@ -654,7 +655,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,\n \t * there isn't much point in holding memory we can't use\n \t */\n \tif (dma_mapping_error(rx_ring->dev, dma)) {\n-\t\t__free_pages(page, 0);\n+\t\t__free_pages(page, i40e_rx_pg_order(rx_ring));\n \t\trx_ring->rx_stats.alloc_page_failed++;\n \t\treturn false;\n \t}\n@@ -714,7 +715,7 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)\n \t\t/* sync the buffer for use by the device */\n \t\tdma_sync_single_range_for_device(rx_ring->dev, bi->dma,\n \t\t\t\t\t\t bi->page_offset,\n-\t\t\t\t\t\t I40E_RXBUFFER_2048,\n+\t\t\t\t\t\t rx_ring->rx_buf_len,\n \t\t\t\t\t\t DMA_FROM_DEVICE);\n \n \t\t/* Refresh the desc even if buffer_addrs didn't change\n@@ -1006,9 +1007,6 @@ static inline bool i40e_page_is_reusable(struct page *page)\n  **/\n static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)\n {\n-#if (PAGE_SIZE >= 8192)\n-\tunsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;\n-#endif\n \tunsigned int pagecnt_bias = rx_buffer->pagecnt_bias;\n \tstruct page *page = rx_buffer->page;\n \n@@ -1021,7 +1019,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)\n \tif (unlikely((page_count(page) - pagecnt_bias) > 1))\n \t\treturn false;\n #else\n-\tif (rx_buffer->page_offset > last_offset)\n+#define I40E_LAST_OFFSET \\\n+\t(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)\n+\tif (rx_buffer->page_offset > I40E_LAST_OFFSET)\n \t\treturn false;\n #endif\n \n@@ -1055,7 +1055,7 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,\n \t\t\t     unsigned int size)\n {\n #if (PAGE_SIZE < 8192)\n-\tunsigned int truesize = I40E_RXBUFFER_2048;\n+\tunsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;\n #else\n \tunsigned int truesize = SKB_DATA_ALIGN(size);\n #endif\n@@ -1116,7 +1116,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,\n {\n \tvoid *va = page_address(rx_buffer->page) + rx_buffer->page_offset;\n #if (PAGE_SIZE < 8192)\n-\tunsigned int truesize = I40E_RXBUFFER_2048;\n+\tunsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;\n #else\n \tunsigned int truesize = SKB_DATA_ALIGN(size);\n #endif\n@@ -1182,7 +1182,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,\n \t\trx_ring->rx_stats.page_reuse_count++;\n \t} else {\n \t\t/* we are not reusing the buffer so unmap it */\n-\t\tdma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,\n+\t\tdma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,\n+\t\t\t\t     i40e_rx_pg_size(rx_ring),\n \t\t\t\t     DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);\n \t\t__page_frag_cache_drain(rx_buffer->page,\n \t\t\t\t\trx_buffer->pagecnt_bias);\ndiff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h\nindex 3bb4d73..dc82f65 100644\n--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h\n+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h\n@@ -106,6 +106,7 @@ enum i40e_dyn_idx_t {\n #define I40E_RXBUFFER_256   256\n #define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */\n #define I40E_RXBUFFER_2048  2048\n+#define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */\n #define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */\n \n /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we\n@@ -376,6 +377,17 @@ struct i40e_ring_container {\n #define i40e_for_each_ring(pos, head) \\\n \tfor (pos = (head).ring; pos != NULL; pos = pos->next)\n \n+static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)\n+{\n+#if (PAGE_SIZE < 8192)\n+\tif (ring->rx_buf_len > (PAGE_SIZE / 2))\n+\t\treturn 1;\n+#endif\n+\treturn 0;\n+}\n+\n+#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))\n+\n bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);\n netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);\n void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);\ndiff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c\nindex 2f8f599..95d5c04 100644\n--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c\n+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c\n@@ -694,6 +694,12 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)\n \t/* Legacy Rx will always default to a 2048 buffer size. */\n #if (PAGE_SIZE < 8192)\n \tif (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {\n+\t\t/* For jumbo frames on systems with 4K pages we have to use\n+\t\t * an order 1 page, so we might as well increase the size\n+\t\t * of our Rx buffer to make better use of the available space\n+\t\t */\n+\t\trx_buf_len = I40E_RXBUFFER_3072;\n+\n \t\t/* We use a 1536 buffer size for configurations with\n \t\t * standard Ethernet mtu.  On x86 this gives us enough room\n \t\t * for shared info and 192 bytes of padding.\n",
    "prefixes": [
        "next",
        "S66",
        "v2",
        "09/11"
    ]
}