Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1183248/?format=api
{ "id": 1183248, "url": "http://patchwork.ozlabs.org/api/patches/1183248/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191024081125.6711-6-anthony.l.nguyen@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20191024081125.6711-6-anthony.l.nguyen@intel.com>", "list_archive_url": null, "date": "2019-10-24T08:11:22", "name": "[S30,v3,6/9] ice: introduce legacy Rx flag", "commit_ref": null, "pull_url": null, "state": "accepted", "archived": false, "hash": "0a08ba8b1b3ccac72193f28b968286e6a543ffb5", "submitter": { "id": 68875, "url": "http://patchwork.ozlabs.org/api/people/68875/?format=api", "name": "Tony Nguyen", "email": "anthony.l.nguyen@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191024081125.6711-6-anthony.l.nguyen@intel.com/mbox/", "series": [ { "id": 138433, "url": "http://patchwork.ozlabs.org/api/series/138433/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=138433", "date": "2019-10-24T08:11:25", "name": "[S30,v3,1/9] ice: Introduce ice_base.c", "version": 3, "mbox": "http://patchwork.ozlabs.org/series/138433/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1183248/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/1183248/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org; spf=pass (sender SPF authorized)\n\tsmtp.mailfrom=osuosl.org (client-ip=140.211.166.136;\n\thelo=silver.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com" ], "Received": [ "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 46zY1T0snRz9sR0\n\tfor <incoming@patchwork.ozlabs.org>;\n\tFri, 25 Oct 2019 03:42:04 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id 12D5B230A4;\n\tThu, 24 Oct 2019 16:42:03 +0000 (UTC)", "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id x0hS8koCWTXE; Thu, 24 Oct 2019 16:42:00 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby silver.osuosl.org (Postfix) with ESMTP id 60AB722F4C;\n\tThu, 24 Oct 2019 16:42:00 +0000 (UTC)", "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id B3D271BF860\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 24 Oct 2019 16:41:56 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id B0D4586DE2\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 24 Oct 2019 16:41:56 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id Cmpfleg3-IGR for <intel-wired-lan@lists.osuosl.org>;\n\tThu, 24 Oct 2019 16:41:55 +0000 (UTC)", "from mga06.intel.com (mga06.intel.com [134.134.136.31])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id 8D763870C7\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 24 Oct 2019 16:41:55 +0000 (UTC)", "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t24 Oct 2019 09:41:54 -0700", "from unknown (HELO localhost.jf.intel.com) ([10.166.244.174])\n\tby orsmga002.jf.intel.com with ESMTP; 24 Oct 2019 09:41:53 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.68,225,1569308400\"; d=\"scan'208\";a=\"210438731\"", "From": "Tony Nguyen <anthony.l.nguyen@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Thu, 24 Oct 2019 01:11:22 -0700", "Message-Id": "<20191024081125.6711-6-anthony.l.nguyen@intel.com>", "X-Mailer": "git-send-email 2.20.1", "In-Reply-To": "<20191024081125.6711-1-anthony.l.nguyen@intel.com>", "References": "<20191024081125.6711-1-anthony.l.nguyen@intel.com>", "MIME-Version": "1.0", "Subject": "[Intel-wired-lan] [PATCH S30 v3 6/9] ice: introduce legacy Rx flag", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>\n\nAdd an ethtool \"legacy-rx\" priv flag for toggling the Rx path. This\ncontrol knob will be mainly used for build_skb usage as well as buffer\nsize/MTU manipulation.\n\nIn preparation for adding build_skb support in a way that it takes\ncare of how we set the values of max_frame and rx_buf_len fields of\nstruct ice_vsi. Specifically, in this patch mentioned fields are set to\nvalues that will allow us to provide headroom and tailroom in-place.\n\nThis can be mostly broken down onto following:\n- for legacy-rx \"on\" ethtool control knob, old behaviour is kept;\n- for standard 1500 MTU size configure the buffer of size 1536, as\n network stack is expecting the NET_SKB_PAD to be provided and\n NET_IP_ALIGN can have a non-zero value (these can be typically equal\n to 32 and 2, respectively);\n- for larger MTUs go with max_frame set to 9k and configure the 3k\n buffer in case when PAGE_SIZE of underlying arch is less than 8k; 3k\n buffer is implying the need for order 1 page, so that our page\n recycling scheme can still be applied;\n\nWith that said, substitute the hardcoded ICE_RXBUF_2048 and PAGE_SIZE\nvalues in DMA API that we're making use of with rx_ring->rx_buf_len and\nice_rx_pg_size(rx_ring). The latter is an introduced helper for\ndetermining the page size based on its order (which was figured out via\nice_rx_pg_order). Last but not least, take care of truesize calculation.\n\nIn the followup patch the headroom/tailroom computation logic will be\nintroduced.\n\nThis change aligns the buffer and frame configuration with other Intel\ndrivers, most importantly with iavf.\n\nSigned-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>\nSigned-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>\n---\n drivers/net/ethernet/intel/ice/ice.h | 1 +\n drivers/net/ethernet/intel/ice/ice_ethtool.c | 6 +++\n drivers/net/ethernet/intel/ice/ice_lib.c | 22 +++++++---\n drivers/net/ethernet/intel/ice/ice_txrx.c | 46 ++++++++++++--------\n drivers/net/ethernet/intel/ice/ice_txrx.h | 13 ++++++\n 5 files changed, 63 insertions(+), 25 deletions(-)", "diff": "diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h\nindex b78ac65fa935..f48c3ab28ded 100644\n--- a/drivers/net/ethernet/intel/ice/ice.h\n+++ b/drivers/net/ethernet/intel/ice/ice.h\n@@ -337,6 +337,7 @@ enum ice_pf_flags {\n \tICE_FLAG_NO_MEDIA,\n \tICE_FLAG_FW_LLDP_AGENT,\n \tICE_FLAG_ETHTOOL_CTXT,\t\t/* set when ethtool holds RTNL lock */\n+\tICE_FLAG_LEGACY_RX,\n \tICE_PF_FLAGS_NBITS\t\t/* must be last */\n };\n \ndiff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c\nindex 42b032620f66..c1737625bbc2 100644\n--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c\n+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c\n@@ -156,6 +156,7 @@ struct ice_priv_flag {\n static const struct ice_priv_flag ice_gstrings_priv_flags[] = {\n \tICE_PRIV_FLAG(\"link-down-on-close\", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),\n \tICE_PRIV_FLAG(\"fw-lldp-agent\", ICE_FLAG_FW_LLDP_AGENT),\n+\tICE_PRIV_FLAG(\"legacy-rx\", ICE_FLAG_LEGACY_RX),\n };\n \n #define ICE_PRIV_FLAG_ARRAY_SIZE\tARRAY_SIZE(ice_gstrings_priv_flags)\n@@ -1256,6 +1257,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)\n \t\t\t\t\t\"Fail to enable MIB change events\\n\");\n \t\t}\n \t}\n+\tif (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {\n+\t\t/* down and up VSI so that changes of Rx cfg are reflected. */\n+\t\tice_down(vsi);\n+\t\tice_up(vsi);\n+\t}\n \tclear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);\n \treturn ret;\n }\ndiff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c\nindex 056ab8d3d052..d54ba4bbebf8 100644\n--- a/drivers/net/ethernet/intel/ice/ice_lib.c\n+++ b/drivers/net/ethernet/intel/ice/ice_lib.c\n@@ -1225,12 +1225,22 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)\n */\n void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)\n {\n-\tif (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)\n-\t\tvsi->max_frame = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;\n-\telse\n-\t\tvsi->max_frame = ICE_RXBUF_2048;\n-\n-\tvsi->rx_buf_len = ICE_RXBUF_2048;\n+\tif (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {\n+\t\tvsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;\n+\t\tvsi->rx_buf_len = ICE_RXBUF_2048;\n+#if (PAGE_SIZE < 8192)\n+\t} else if (vsi->netdev->mtu <= ETH_DATA_LEN) {\n+\t\tvsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;\n+\t\tvsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;\n+#endif\n+\t} else {\n+\t\tvsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;\n+#if (PAGE_SIZE < 8192)\n+\t\tvsi->rx_buf_len = ICE_RXBUF_3072;\n+#else\n+\t\tvsi->rx_buf_len = ICE_RXBUF_2048;\n+#endif\n+\t}\n }\n \n /**\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c\nindex acf834a7130a..63fe73c5097c 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.c\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c\n@@ -310,10 +310,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)\n \t\t */\n \t\tdma_sync_single_range_for_cpu(dev, rx_buf->dma,\n \t\t\t\t\t rx_buf->page_offset,\n-\t\t\t\t\t ICE_RXBUF_2048, DMA_FROM_DEVICE);\n+\t\t\t\t\t rx_ring->rx_buf_len,\n+\t\t\t\t\t DMA_FROM_DEVICE);\n \n \t\t/* free resources associated with mapping */\n-\t\tdma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,\n+\t\tdma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),\n \t\t\t\t DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);\n \t\t__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);\n \n@@ -529,21 +530,21 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)\n \t}\n \n \t/* alloc new page for storage */\n-\tpage = alloc_page(GFP_ATOMIC | __GFP_NOWARN);\n+\tpage = dev_alloc_pages(ice_rx_pg_order(rx_ring));\n \tif (unlikely(!page)) {\n \t\trx_ring->rx_stats.alloc_page_failed++;\n \t\treturn false;\n \t}\n \n \t/* map page for use */\n-\tdma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,\n+\tdma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),\n \t\t\t\t DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);\n \n \t/* if mapping failed free memory back to system since\n \t * there isn't much point in holding memory we can't use\n \t */\n \tif (dma_mapping_error(rx_ring->dev, dma)) {\n-\t\t__free_pages(page, 0);\n+\t\t__free_pages(page, ice_rx_pg_order(rx_ring));\n \t\trx_ring->rx_stats.alloc_page_failed++;\n \t\treturn false;\n \t}\n@@ -592,7 +593,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)\n \t\t/* sync the buffer for use by the device */\n \t\tdma_sync_single_range_for_device(rx_ring->dev, bi->dma,\n \t\t\t\t\t\t bi->page_offset,\n-\t\t\t\t\t\t ICE_RXBUF_2048,\n+\t\t\t\t\t\t rx_ring->rx_buf_len,\n \t\t\t\t\t\t DMA_FROM_DEVICE);\n \n \t\t/* Refresh the desc even if buffer_addrs didn't change\n@@ -663,9 +664,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)\n */\n static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)\n {\n-#if (PAGE_SIZE >= 8192)\n-\tunsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;\n-#endif\n \tunsigned int pagecnt_bias = rx_buf->pagecnt_bias;\n \tstruct page *page = rx_buf->page;\n \n@@ -678,7 +676,9 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)\n \tif (unlikely((page_count(page) - pagecnt_bias) > 1))\n \t\treturn false;\n #else\n-\tif (rx_buf->page_offset > last_offset)\n+#define ICE_LAST_OFFSET \\\n+\t(SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)\n+\tif (rx_buf->page_offset > ICE_LAST_OFFSET)\n \t\treturn false;\n #endif /* PAGE_SIZE < 8192) */\n \n@@ -696,6 +696,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)\n \n /**\n * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag\n+ * @rx_ring: Rx descriptor ring to transact packets on\n * @rx_buf: buffer containing page to add\n * @skb: sk_buff to place the data into\n * @size: packet length from rx_desc\n@@ -705,13 +706,13 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)\n * The function will then update the page offset.\n */\n static void\n-ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,\n-\t\tunsigned int size)\n+ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,\n+\t\tstruct sk_buff *skb, unsigned int size)\n {\n #if (PAGE_SIZE >= 8192)\n \tunsigned int truesize = SKB_DATA_ALIGN(size);\n #else\n-\tunsigned int truesize = ICE_RXBUF_2048;\n+\tunsigned int truesize = ice_rx_pg_size(rx_ring) / 2;\n #endif\n \n \tif (!size)\n@@ -830,7 +831,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,\n #if (PAGE_SIZE >= 8192)\n \t\tunsigned int truesize = SKB_DATA_ALIGN(size);\n #else\n-\t\tunsigned int truesize = ICE_RXBUF_2048;\n+\t\tunsigned int truesize = ice_rx_pg_size(rx_ring) / 2;\n #endif\n \t\tskb_add_rx_frag(skb, 0, rx_buf->page,\n \t\t\t\trx_buf->page_offset + headlen, size, truesize);\n@@ -873,8 +874,9 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)\n \t\trx_ring->rx_stats.page_reuse_count++;\n \t} else {\n \t\t/* we are not reusing the buffer so unmap it */\n-\t\tdma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,\n-\t\t\t\t DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);\n+\t\tdma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,\n+\t\t\t\t ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,\n+\t\t\t\t ICE_RX_DMA_ATTR);\n \t\t__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);\n \t}\n \n@@ -1008,9 +1010,15 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)\n \t\trcu_read_unlock();\n \t\tif (xdp_res) {\n \t\t\tif (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {\n+\t\t\t\tunsigned int truesize;\n+\n+#if (PAGE_SIZE < 8192)\n+\t\t\t\ttruesize = ice_rx_pg_size(rx_ring) / 2;\n+#else\n+\t\t\t\ttruesize = SKB_DATA_ALIGN(size);\n+#endif\n \t\t\t\txdp_xmit |= xdp_res;\n-\t\t\t\tice_rx_buf_adjust_pg_offset(rx_buf,\n-\t\t\t\t\t\t\t ICE_RXBUF_2048);\n+\t\t\t\tice_rx_buf_adjust_pg_offset(rx_buf, truesize);\n \t\t\t} else {\n \t\t\t\trx_buf->pagecnt_bias++;\n \t\t\t}\n@@ -1023,7 +1031,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)\n \t\t}\n construct_skb:\n \t\tif (skb)\n-\t\t\tice_add_rx_frag(rx_buf, skb, size);\n+\t\t\tice_add_rx_frag(rx_ring, rx_buf, skb, size);\n \t\telse\n \t\t\tskb = ice_construct_skb(rx_ring, rx_buf, &xdp);\n \ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h\nindex d5d243b8e69f..6a6e3d2339ba 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.h\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h\n@@ -7,7 +7,9 @@\n #include \"ice_type.h\"\n \n #define ICE_DFLT_IRQ_WORK\t256\n+#define ICE_RXBUF_3072\t\t3072\n #define ICE_RXBUF_2048\t\t2048\n+#define ICE_RXBUF_1536\t\t1536\n #define ICE_MAX_CHAINED_RX_BUFS\t5\n #define ICE_MAX_BUF_TXD\t\t8\n #define ICE_MIN_TX_LEN\t\t17\n@@ -262,6 +264,17 @@ struct ice_ring_container {\n #define ice_for_each_ring(pos, head) \\\n \tfor (pos = (head).ring; pos; pos = pos->next)\n \n+static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)\n+{\n+#if (PAGE_SIZE < 8192)\n+\tif (ring->rx_buf_len > (PAGE_SIZE / 2))\n+\t\treturn 1;\n+#endif\n+\treturn 0;\n+}\n+\n+#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))\n+\n union ice_32b_rx_flex_desc;\n \n bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);\n", "prefixes": [ "S30", "v3", "6/9" ] }