[net-next,2/7] net: aquantia: optimize rx performance by page reuse strategy
diff mbox series

Message ID 0acd9e5b04184815d446a965fe8920a877b5fe98.1553353914.git.igor.russkikh@aquantia.com
State Accepted
Delegated to: David Miller
Headers show
Series
  • net: aquantia: RX performance optimization patches
Related show

Commit Message

Igor Russkikh March 23, 2019, 3:23 p.m. UTC
We introduce internal aq_rxpage wrapper over regular page
where extra field is tracked: rxpage offset inside of allocated page.

This offset allows to reuse one page for multiple packets.
When needed (for example with large frames processing), allocated
pageorder could be customized. This gives even larger page reuse
efficiency.

page_ref_count is used to track page users. If during rx refill
underlying page has users, we increase pg_off by rx frame size
thus the top half of the page is reused.

Signed-off-by: Igor Russkikh <igor.russkikh@aquantia.com>
---
 .../net/ethernet/aquantia/atlantic/aq_cfg.h   |   2 +
 .../net/ethernet/aquantia/atlantic/aq_nic.c   |   1 +
 .../net/ethernet/aquantia/atlantic/aq_nic.h   |   1 +
 .../net/ethernet/aquantia/atlantic/aq_ring.c  | 166 +++++++++++++-----
 .../net/ethernet/aquantia/atlantic/aq_ring.h  |  34 ++--
 .../net/ethernet/aquantia/atlantic/aq_vec.c   |   3 +
 .../aquantia/atlantic/hw_atl/hw_atl_a0.c      |   4 -
 .../aquantia/atlantic/hw_atl/hw_atl_b0.c      |   4 -
 8 files changed, 152 insertions(+), 63 deletions(-)

Patch
diff mbox series

diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index aba550770adf..80c16ab87771 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -40,6 +40,8 @@ 
 
 #define AQ_CFG_RX_HDR_SIZE 256U
 
+#define AQ_CFG_RX_PAGEORDER 0U
+
 /* LRO */
 #define AQ_CFG_IS_LRO_DEF           1U
 
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ff83667410bd..059df86e8e37 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -73,6 +73,7 @@  void aq_nic_cfg_start(struct aq_nic_s *self)
 	cfg->tx_itr = aq_itr_tx;
 	cfg->rx_itr = aq_itr_rx;
 
+	cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
 	cfg->is_rss = AQ_CFG_IS_RSS_DEF;
 	cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
 	cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 8e34c1e49bf2..b1372430f62f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -31,6 +31,7 @@  struct aq_nic_cfg_s {
 	u32 itr;
 	u16 rx_itr;
 	u16 tx_itr;
+	u32 rxpageorder;
 	u32 num_rss_queues;
 	u32 mtu;
 	u32 flow_control;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 42163b5d1cac..1b258694144c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -12,10 +12,89 @@ 
 #include "aq_ring.h"
 #include "aq_nic.h"
 #include "aq_hw.h"
+#include "aq_hw_utils.h"
 
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 
+static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
+{
+	unsigned int len = PAGE_SIZE << rxpage->order;
+
+	dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
+
+	/* Drop the ref for being in the ring. */
+	__free_pages(rxpage->page, rxpage->order);
+	rxpage->page = NULL;
+}
+
+static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
+			 struct device *dev)
+{
+	struct page *page;
+	dma_addr_t daddr;
+	int ret = -ENOMEM;
+
+	page = dev_alloc_pages(order);
+	if (unlikely(!page))
+		goto err_exit;
+
+	daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
+			     DMA_FROM_DEVICE);
+
+	if (unlikely(dma_mapping_error(dev, daddr)))
+		goto free_page;
+
+	rxpage->page = page;
+	rxpage->daddr = daddr;
+	rxpage->order = order;
+	rxpage->pg_off = 0;
+
+	return 0;
+
+free_page:
+	__free_pages(page, order);
+
+err_exit:
+	return ret;
+}
+
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
+			  int order)
+{
+	int ret;
+
+	if (rxbuf->rxdata.page) {
+		/* One means ring is the only user and can reuse */
+		if (page_ref_count(rxbuf->rxdata.page) > 1) {
+			/* Try reuse buffer */
+			rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
+			if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
+				(PAGE_SIZE << order)) {
+				self->stats.rx.pg_flips++;
+			} else {
+				/* Buffer exhausted. We have other users and
+				 * should release this page and realloc
+				 */
+				aq_free_rxpage(&rxbuf->rxdata,
+					       aq_nic_get_dev(self->aq_nic));
+				self->stats.rx.pg_losts++;
+			}
+		} else {
+			rxbuf->rxdata.pg_off = 0;
+			self->stats.rx.pg_reuses++;
+		}
+	}
+
+	if (!rxbuf->rxdata.page) {
+		ret = aq_get_rxpage(&rxbuf->rxdata, order,
+				    aq_nic_get_dev(self->aq_nic));
+		return ret;
+	}
+
+	return 0;
+}
+
 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
 				       struct aq_nic_s *aq_nic)
 {
@@ -81,6 +160,11 @@  struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
 	self->idx = idx;
 	self->size = aq_nic_cfg->rxds;
 	self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
+	self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
+			       (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
+
+	if (aq_nic_cfg->rxpageorder > self->page_order)
+		self->page_order = aq_nic_cfg->rxpageorder;
 
 	self = aq_ring_alloc(self, aq_nic);
 	if (!self) {
@@ -213,10 +297,8 @@  int aq_ring_rx_clean(struct aq_ring_s *self,
 		unsigned int i = 0U;
 		u16 hdr_len;
 
-		if (buff->is_error) {
-			__free_pages(buff->page, 0);
+		if (buff->is_error)
 			continue;
-		}
 
 		if (buff->is_cleaned)
 			continue;
@@ -246,16 +328,22 @@  int aq_ring_rx_clean(struct aq_ring_s *self,
 			}
 		}
 
+		dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
+					      buff->rxdata.daddr,
+					      buff->rxdata.pg_off,
+					      buff->len, DMA_FROM_DEVICE);
+
 		/* for single fragment packets use build_skb() */
 		if (buff->is_eop &&
 		    buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
-			skb = build_skb(page_address(buff->page),
+			skb = build_skb(aq_buf_vaddr(&buff->rxdata),
 					AQ_CFG_RX_FRAME_MAX);
 			if (unlikely(!skb)) {
 				err = -ENOMEM;
 				goto err_exit;
 			}
 			skb_put(skb, buff->len);
+			page_ref_inc(buff->rxdata.page);
 		} else {
 			skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
 			if (unlikely(!skb)) {
@@ -265,34 +353,41 @@  int aq_ring_rx_clean(struct aq_ring_s *self,
 
 			hdr_len = buff->len;
 			if (hdr_len > AQ_CFG_RX_HDR_SIZE)
-				hdr_len = eth_get_headlen(page_address(buff->page),
+				hdr_len = eth_get_headlen(aq_buf_vaddr(&buff->rxdata),
 							  AQ_CFG_RX_HDR_SIZE);
 
-			memcpy(__skb_put(skb, hdr_len), page_address(buff->page),
+			memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
 			       ALIGN(hdr_len, sizeof(long)));
 
 			if (buff->len - hdr_len > 0) {
-				skb_add_rx_frag(skb, 0, buff->page,
-						hdr_len,
+				skb_add_rx_frag(skb, 0, buff->rxdata.page,
+						buff->rxdata.pg_off + hdr_len,
 						buff->len - hdr_len,
-						SKB_TRUESIZE(buff->len - hdr_len));
+						AQ_CFG_RX_FRAME_MAX);
+				page_ref_inc(buff->rxdata.page);
 			}
 
 			if (!buff->is_eop) {
-				for (i = 1U, next_ = buff->next,
-				     buff_ = &self->buff_ring[next_];
-				     true; next_ = buff_->next,
-				     buff_ = &self->buff_ring[next_], ++i) {
-					skb_add_rx_frag(skb, i,
-							buff_->page, 0,
+				buff_ = buff;
+				i = 1U;
+				do {
+					next_ = buff_->next,
+					buff_ = &self->buff_ring[next_];
+
+					dma_sync_single_range_for_cpu(
+							aq_nic_get_dev(self->aq_nic),
+							buff_->rxdata.daddr,
+							buff_->rxdata.pg_off,
+							buff_->len,
+							DMA_FROM_DEVICE);
+					skb_add_rx_frag(skb, i++,
+							buff_->rxdata.page,
+							buff_->rxdata.pg_off,
 							buff_->len,
-							SKB_TRUESIZE(buff->len -
-							ETH_HLEN));
+							AQ_CFG_RX_FRAME_MAX);
+					page_ref_inc(buff_->rxdata.page);
 					buff_->is_cleaned = 1;
-
-					if (buff_->is_eop)
-						break;
-				}
+				} while (!buff_->is_eop);
 			}
 		}
 
@@ -318,8 +413,7 @@  int aq_ring_rx_clean(struct aq_ring_s *self,
 
 int aq_ring_rx_fill(struct aq_ring_s *self)
 {
-	unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
-		(AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
+	unsigned int page_order = self->page_order;
 	struct aq_ring_buff_s *buff = NULL;
 	int err = 0;
 	int i = 0;
@@ -331,30 +425,15 @@  int aq_ring_rx_fill(struct aq_ring_s *self)
 		buff->flags = 0U;
 		buff->len = AQ_CFG_RX_FRAME_MAX;
 
-		buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order);
-		if (!buff->page) {
-			err = -ENOMEM;
+		err = aq_get_rxpages(self, buff, page_order);
+		if (err)
 			goto err_exit;
-		}
-
-		buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
-					buff->page, 0,
-					AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
-
-		if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
-			err = -ENOMEM;
-			goto err_exit;
-		}
 
+		buff->pa = aq_buf_daddr(&buff->rxdata);
 		buff = NULL;
 	}
 
 err_exit:
-	if (err < 0) {
-		if (buff && buff->page)
-			__free_pages(buff->page, 0);
-	}
-
 	return err;
 }
 
@@ -367,10 +446,7 @@  void aq_ring_rx_deinit(struct aq_ring_s *self)
 		self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
 		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
 
-		dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
-			       AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
-
-		__free_pages(buff->page, 0);
+		aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
 	}
 
 err_exit:;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index ac1329f4051d..cfffc301e746 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -17,6 +17,13 @@ 
 struct page;
 struct aq_nic_cfg_s;
 
+struct aq_rxpage {
+	struct page *page;
+	dma_addr_t daddr;
+	unsigned int order;
+	unsigned int pg_off;
+};
+
 /*           TxC       SOP        DX         EOP
  *         +----------+----------+----------+-----------
  *   8bytes|len l3,l4 | pa       | pa       | pa
@@ -31,28 +38,21 @@  struct aq_nic_cfg_s;
  */
 struct __packed aq_ring_buff_s {
 	union {
+		/* RX/TX */
+		dma_addr_t pa;
 		/* RX */
 		struct {
 			u32 rss_hash;
 			u16 next;
 			u8 is_hash_l4;
 			u8 rsvd1;
-			struct page *page;
+			struct aq_rxpage rxdata;
 		};
 		/* EOP */
 		struct {
 			dma_addr_t pa_eop;
 			struct sk_buff *skb;
 		};
-		/* DX */
-		struct {
-			dma_addr_t pa;
-		};
-		/* SOP */
-		struct {
-			dma_addr_t pa_sop;
-			u32 len_pkt_sop;
-		};
 		/* TxC */
 		struct {
 			u32 mss;
@@ -91,6 +91,9 @@  struct aq_ring_stats_rx_s {
 	u64 bytes;
 	u64 lro_packets;
 	u64 jumbo_packets;
+	u64 pg_losts;
+	u64 pg_flips;
+	u64 pg_reuses;
 };
 
 struct aq_ring_stats_tx_s {
@@ -116,6 +119,7 @@  struct aq_ring_s {
 	unsigned int size;	/* descriptors number */
 	unsigned int dx_size;	/* TX or RX descriptor size,  */
 				/* stored here for fater math */
+	unsigned int page_order;
 	union aq_ring_stats_s stats;
 	dma_addr_t dx_ring_pa;
 };
@@ -126,6 +130,16 @@  struct aq_ring_param_s {
 	cpumask_t affinity_mask;
 };
 
+static inline void *aq_buf_vaddr(struct aq_rxpage *rxpage)
+{
+	return page_to_virt(rxpage->page) + rxpage->pg_off;
+}
+
+static inline dma_addr_t aq_buf_daddr(struct aq_rxpage *rxpage)
+{
+	return rxpage->daddr + rxpage->pg_off;
+}
+
 static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
 					   unsigned int dx)
 {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index d335c334fa56..a2e4ca1782ae 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -353,6 +353,9 @@  void aq_vec_add_stats(struct aq_vec_s *self,
 		stats_rx->errors += rx->errors;
 		stats_rx->jumbo_packets += rx->jumbo_packets;
 		stats_rx->lro_packets += rx->lro_packets;
+		stats_rx->pg_losts += rx->pg_losts;
+		stats_rx->pg_flips += rx->pg_flips;
+		stats_rx->pg_reuses += rx->pg_reuses;
 
 		stats_tx->packets += tx->packets;
 		stats_tx->bytes += tx->bytes;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index f6f8338153a2..65ffaa7ad69e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -619,8 +619,6 @@  static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
 static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
 					struct aq_ring_s *ring)
 {
-	struct device *ndev = aq_nic_get_dev(ring->aq_nic);
-
 	for (; ring->hw_head != ring->sw_tail;
 		ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
 		struct aq_ring_buff_s *buff = NULL;
@@ -687,8 +685,6 @@  static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
 		is_err &= ~0x18U;
 		is_err &= ~0x04U;
 
-		dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
-
 		if (is_err || rxd_wb->type & 0x1000U) {
 			/* status error or DMA error */
 			buff->is_error = 1U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index b31dba1b1a55..f4e895906b1a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -654,8 +654,6 @@  static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
 static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
 					struct aq_ring_s *ring)
 {
-	struct device *ndev = aq_nic_get_dev(ring->aq_nic);
-
 	for (; ring->hw_head != ring->sw_tail;
 		ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
 		struct aq_ring_buff_s *buff = NULL;
@@ -697,8 +695,6 @@  static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
 			buff->is_cso_err = 0U;
 		}
 
-		dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
-
 		if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
 			/* MAC error or DMA error */
 			buff->is_error = 1U;