diff mbox

[2/4] xen/netback: Split one page pool into two(tx/rx) page pool.

Message ID 1352963089-599-1-git-send-email-annie.li@oracle.com
State Changes Requested, archived
Delegated to: David Miller
Headers show

Commit Message

Annie.li Nov. 15, 2012, 7:04 a.m. UTC
For tx path, this implementation simplifies the work of searching out
grant page from page pool based on grant reference.

Signed-off-by: Annie Li <annie.li@oracle.com>
---
 drivers/net/xen-netback/common.h    |   14 ++++++++++----
 drivers/net/xen-netback/interface.c |   12 ++++++++----
 drivers/net/xen-netback/netback.c   |   15 +++++++++------
 3 files changed, 27 insertions(+), 14 deletions(-)

Comments

Ian Campbell Nov. 15, 2012, 9:15 a.m. UTC | #1
On Thu, 2012-11-15 at 07:04 +0000, Annie Li wrote:
> For tx path, this implementation simplifies the work of searching out
> grant page from page pool based on grant reference.

It's still a linear search though, and it doesn't look much simpler to
me:
  	for (i = 0; i < count; i++) {
		if (tx_pool)
			vif = netbk->gnttab_tx_vif[i];
		else
			vif = netbk->gnttab_rx_vif[i];

		pers_entry = vif->persistent_gnt;
		gnt_count = &vif->persistent_gntcnt;
		gnt_total = MAXIMUM_OUTSTANDING_BLOCK_REQS;
becomes:
	for (i = 0; i < count; i++) {
		if (tx_pool) {
			vif = netbk->gnttab_tx_vif[i];
			gnt_count = &vif->persistent_tx_gntcnt;
			gnt_total = XEN_NETIF_TX_RING_SIZE;
			pers_entry = vif->persistent_tx_gnt;
		} else {
			vif = netbk->gnttab_rx_vif[i];
			gnt_count = &vif->persistent_rx_gntcnt;
			gnt_total = 2*XEN_NETIF_RX_RING_SIZE;
			pers_entry = vif->persistent_rx_gnt;
		}

> @@ -111,8 +109,16 @@ struct xenvif {
>  
>  	wait_queue_head_t waiting_to_free;
>  
> -	struct persistent_entry *persistent_gnt[MAXIMUM_OUTSTANDING_BLOCK_REQS];
> -	unsigned int persistent_gntcnt;
> +	struct persistent_entry *persistent_tx_gnt[XEN_NETIF_TX_RING_SIZE];
> +
> +	/*
> +	 * 2*XEN_NETIF_RX_RING_SIZE is for the case of each head/fragment page

Shouldn't that been incorporated into MAXIMUM_OUTSTANDING_BLOCK_REQS
(sic) too?

> +	 * using 2 copy operations.
> +	 */
> +	struct persistent_entry *persistent_rx_gnt[2*XEN_NETIF_RX_RING_SIZE];

What is the per-vif memory overhead after this change?

Ian.

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Annie.li Nov. 16, 2012, 3:10 a.m. UTC | #2
On 2012-11-15 17:15, Ian Campbell wrote:
> On Thu, 2012-11-15 at 07:04 +0000, Annie Li wrote:
>> For tx path, this implementation simplifies the work of searching out
>> grant page from page pool based on grant reference.
> It's still a linear search though, and it doesn't look much simpler to
> me:
>    	for (i = 0; i<  count; i++) {
> 		if (tx_pool)
> 			vif = netbk->gnttab_tx_vif[i];
> 		else
> 			vif = netbk->gnttab_rx_vif[i];
>
> 		pers_entry = vif->persistent_gnt;
> 		gnt_count =&vif->persistent_gntcnt;
> 		gnt_total = MAXIMUM_OUTSTANDING_BLOCK_REQS;
> becomes:
> 	for (i = 0; i<  count; i++) {
> 		if (tx_pool) {
> 			vif = netbk->gnttab_tx_vif[i];
> 			gnt_count =&vif->persistent_tx_gntcnt;
> 			gnt_total = XEN_NETIF_TX_RING_SIZE;
> 			pers_entry = vif->persistent_tx_gnt;
> 		} else {
> 			vif = netbk->gnttab_rx_vif[i];
> 			gnt_count =&vif->persistent_rx_gntcnt;
> 			gnt_total = 2*XEN_NETIF_RX_RING_SIZE;
> 			pers_entry = vif->persistent_rx_gnt;
> 		}

Yes, the code is not simpler. If we make netback per-VIF based, then 
these code will disappear.
The simplifying here means for tx path, the max search index is 
XEN_NETIF_TX_RING_SIZE(256 here), and this change can save some time 
when searching out grant page for specific grant reference.

>
>> @@ -111,8 +109,16 @@ struct xenvif {
>>
>>   	wait_queue_head_t waiting_to_free;
>>
>> -	struct persistent_entry *persistent_gnt[MAXIMUM_OUTSTANDING_BLOCK_REQS];
>> -	unsigned int persistent_gntcnt;
>> +	struct persistent_entry *persistent_tx_gnt[XEN_NETIF_TX_RING_SIZE];
>> +
>> +	/*
>> +	 * 2*XEN_NETIF_RX_RING_SIZE is for the case of each head/fragment page
> Shouldn't that been incorporated into MAXIMUM_OUTSTANDING_BLOCK_REQS
> (sic) too?

Yes, the total value is same as MAXIMUM_OUTSTANDING_BLOCK_REQS. But here

2*XEN_NETIF_RX_RING_SIZE means it is only used by rx path, and it is used just like other elements in netback structure, such as grant_copy_op, meta, etc.


>> +	 * using 2 copy operations.
>> +	 */
>> +	struct persistent_entry *persistent_rx_gnt[2*XEN_NETIF_RX_RING_SIZE];
> What is the per-vif memory overhead after this change?

Per-vif memory overhead is following,
for tx path, it is about XEN_NETIF_RX_RING_SIZE*PAGE_SIZE  (256 
PAGE_SIZE here)
for rx path, it is about 2*XEN_NETIF_RX_RING_SIZE*PAGE_SIZE  (512 
PAGE_SIZE here)

I can add some comment here.

Thanks
Annie
>
> Ian.
>
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index a85cac6..02c8573 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -47,8 +47,6 @@ 
 
 #define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
 #define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
-#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
-			(XEN_NETIF_TX_RING_SIZE + XEN_NETIF_RX_RING_SIZE)
 
 struct xen_netbk;
 
@@ -111,8 +109,16 @@  struct xenvif {
 
 	wait_queue_head_t waiting_to_free;
 
-	struct persistent_entry *persistent_gnt[MAXIMUM_OUTSTANDING_BLOCK_REQS];
-	unsigned int persistent_gntcnt;
+	struct persistent_entry *persistent_tx_gnt[XEN_NETIF_TX_RING_SIZE];
+
+	/*
+	 * 2*XEN_NETIF_RX_RING_SIZE is for the case of each head/fragment page
+	 * using 2 copy operations.
+	 */
+	struct persistent_entry *persistent_rx_gnt[2*XEN_NETIF_RX_RING_SIZE];
+
+	unsigned int persistent_tx_gntcnt;
+	unsigned int persistent_rx_gntcnt;
 };
 
 static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 226d159..ecbe116 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -300,7 +300,8 @@  struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
 		return ERR_PTR(err);
 	}
 
-	vif->persistent_gntcnt = 0;
+	vif->persistent_tx_gntcnt = 0;
+	vif->persistent_rx_gntcnt = 0;
 
 	netdev_dbg(dev, "Successfully created xenvif\n");
 	return vif;
@@ -385,9 +386,12 @@  void xenvif_disconnect(struct xenvif *vif)
 	unregister_netdev(vif->dev);
 
 	xen_netbk_unmap_frontend_rings(vif);
-	if (vif->persistent_grant)
-		xenvif_free_grants(vif->persistent_gnt,
-				   vif->persistent_gntcnt);
+	if (vif->persistent_grant) {
+		xenvif_free_grants(vif->persistent_tx_gnt,
+				   vif->persistent_tx_gntcnt);
+		xenvif_free_grants(vif->persistent_rx_gnt,
+				   vif->persistent_rx_gntcnt);
+	}
 
 	free_netdev(vif->dev);
 }
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index a26d3fc..ec59c73 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -208,14 +208,17 @@  grant_memory_copy_op(unsigned int cmd, void *vuop, unsigned int count,
 
 	BUG_ON(cmd != GNTTABOP_copy);
 	for (i = 0; i < count; i++) {
-		if (tx_pool)
+		if (tx_pool) {
 			vif = netbk->gnttab_tx_vif[i];
-		else
+			gnt_count = &vif->persistent_tx_gntcnt;
+			gnt_total = XEN_NETIF_TX_RING_SIZE;
+			pers_entry = vif->persistent_tx_gnt;
+		} else {
 			vif = netbk->gnttab_rx_vif[i];
-
-		pers_entry = vif->persistent_gnt;
-		gnt_count = &vif->persistent_gntcnt;
-		gnt_total = MAXIMUM_OUTSTANDING_BLOCK_REQS;
+			gnt_count = &vif->persistent_rx_gntcnt;
+			gnt_total = 2*XEN_NETIF_RX_RING_SIZE;
+			pers_entry = vif->persistent_rx_gnt;
+		}
 
 		if (vif->persistent_grant) {
 			void *saddr, *daddr;