diff mbox

[net-next] Re: rib_trie / Fix inflate_threshold_root. Now=15 size=11 bits

Message ID 20090714183308.GB3090@ami.dom.local
State Accepted, archived
Delegated to: David Miller
Headers show

Commit Message

Jarek Poplawski July 14, 2009, 6:33 p.m. UTC
On Sun, Jul 05, 2009 at 02:32:32PM -0700, Paul E. McKenney wrote:
> On Sun, Jul 05, 2009 at 07:32:08PM +0200, Jarek Poplawski wrote:
> > On Sun, Jul 05, 2009 at 06:20:03PM +0200, Jarek Poplawski wrote:
> > > On Sun, Jul 05, 2009 at 02:30:03AM +0200, Paweł Staszewski wrote:
> > > > Oh
> > > >
> > > > I forgot - please Jarek give me patch with sync rcu and i will make test  
> > > > on preempt kernel
> > > 
> > > Probably non-preempt kernel might need something like this more, but
> > > comparing is always interesting. This patch is based on Paul's
> > > suggestion (I hope).
> > 
> > Hold on ;-) Here is something even better... Syncing after 128 pages
> > might be still too slow, so here is a higher initial value, 1000, plus
> > you can change this while testing in:
> > 
> > /sys/module/fib_trie/parameters/sync_pages
> > 
> > It would be interesting to find the lowest acceptable value.
> 
> Looks like a promising approach to me!
> 
> 							Thanx, Paul

Below is a simpler version of this patch, without the sysfs parameter.
(I left the previous version quoted for comparison.) Thanks.

> > Jarek P.
> > ---> (synchronize take 8; apply on top of the 2.6.29.x with the last
> >  	all-in-one patch, or net-2.6)
> > 
> >  net/ipv4/fib_trie.c |   12 ++++++++++++
> >  1 files changed, 12 insertions(+), 0 deletions(-)
> > 
> > diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
> > index 00a54b2..decc8d0 100644
> > --- a/net/ipv4/fib_trie.c
> > +++ b/net/ipv4/fib_trie.c
> > @@ -71,6 +71,7 @@
> >  #include <linux/netlink.h>
> >  #include <linux/init.h>
> >  #include <linux/list.h>
> > +#include <linux/moduleparam.h>
> >  #include <net/net_namespace.h>
> >  #include <net/ip.h>
> >  #include <net/protocol.h>
> > @@ -164,6 +165,10 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn);
> >  static struct tnode *halve(struct trie *t, struct tnode *tn);
> >  /* tnodes to free after resize(); protected by RTNL */
> >  static struct tnode *tnode_free_head;
> > +static size_t tnode_free_size;
> > +
> > +static int sync_pages __read_mostly = 1000;
> > +module_param(sync_pages, int, 0640);
> > 
> >  static struct kmem_cache *fn_alias_kmem __read_mostly;
> >  static struct kmem_cache *trie_leaf_kmem __read_mostly;
> > @@ -393,6 +398,8 @@ static void tnode_free_safe(struct tnode *tn)
> >  	BUG_ON(IS_LEAF(tn));
> >  	tn->tnode_free = tnode_free_head;
> >  	tnode_free_head = tn;
> > +	tnode_free_size += sizeof(struct tnode) +
> > +			   (sizeof(struct node *) << tn->bits);
> >  }
> > 
> >  static void tnode_free_flush(void)
> > @@ -404,6 +411,11 @@ static void tnode_free_flush(void)
> >  		tn->tnode_free = NULL;
> >  		tnode_free(tn);
> >  	}
> > +
> > +	if (tnode_free_size >= PAGE_SIZE * sync_pages) {
> > +		tnode_free_size = 0;
> > +		synchronize_rcu();
> > +	}
> >  }
> > 
> >  static struct leaf *leaf_new(void)
> > --

------------------------>
ipv4: Use synchronize_rcu() during trie_rebalance()

During trie_rebalance() we free memory after resizing with call_rcu(),
but large updates, especially with PREEMPT_NONE configs, can cause
memory stresses, so this patch calls synchronize_rcu() in
tnode_free_flush() after each sync_pages to guarantee such freeing
(especially before resizing the root node).

The value of sync_pages = 128 is based on Pawel Staszewski's tests as
the lowest which doesn't hinder updating times. (For testing purposes
there was a sysfs module parameter to change it on demand, but it's
removed until we're sure it could be really useful.)

The patch is based on suggestions by: Paul E. McKenney
<paulmck@linux.vnet.ibm.com>

Reported-by: Pawel Staszewski <pstaszewski@itcare.pl>
Tested-by: Pawel Staszewski <pstaszewski@itcare.pl>
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
---

 net/ipv4/fib_trie.c |   15 +++++++++++++++
 1 files changed, 15 insertions(+), 0 deletions(-)

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

David Miller July 20, 2009, 2:41 p.m. UTC | #1
From: Jarek Poplawski <jarkao2@gmail.com>
Date: Tue, 14 Jul 2009 20:33:08 +0200

> ipv4: Use synchronize_rcu() during trie_rebalance()
> 
> During trie_rebalance() we free memory after resizing with call_rcu(),
> but large updates, especially with PREEMPT_NONE configs, can cause
> memory stresses, so this patch calls synchronize_rcu() in
> tnode_free_flush() after each sync_pages to guarantee such freeing
> (especially before resizing the root node).
> 
> The value of sync_pages = 128 is based on Pawel Staszewski's tests as
> the lowest which doesn't hinder updating times. (For testing purposes
> there was a sysfs module parameter to change it on demand, but it's
> removed until we're sure it could be really useful.)
> 
> The patch is based on suggestions by: Paul E. McKenney
> <paulmck@linux.vnet.ibm.com>
> 
> Reported-by: Pawel Staszewski <pstaszewski@itcare.pl>
> Tested-by: Pawel Staszewski <pstaszewski@itcare.pl>
> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>

Applied.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 63c2fa7..58ba9f4 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -164,6 +164,14 @@  static struct tnode *inflate(struct trie *t, struct tnode *tn);
 static struct tnode *halve(struct trie *t, struct tnode *tn);
 /* tnodes to free after resize(); protected by RTNL */
 static struct tnode *tnode_free_head;
+static size_t tnode_free_size;
+
+/*
+ * synchronize_rcu after call_rcu for that many pages; it should be especially
+ * useful before resizing the root node with PREEMPT_NONE configs; the value was
+ * obtained experimentally, aiming to avoid visible slowdown.
+ */
+static const int sync_pages = 128;
 
 static struct kmem_cache *fn_alias_kmem __read_mostly;
 static struct kmem_cache *trie_leaf_kmem __read_mostly;
@@ -393,6 +401,8 @@  static void tnode_free_safe(struct tnode *tn)
 	BUG_ON(IS_LEAF(tn));
 	tn->tnode_free = tnode_free_head;
 	tnode_free_head = tn;
+	tnode_free_size += sizeof(struct tnode) +
+			   (sizeof(struct node *) << tn->bits);
 }
 
 static void tnode_free_flush(void)
@@ -404,6 +414,11 @@  static void tnode_free_flush(void)
 		tn->tnode_free = NULL;
 		tnode_free(tn);
 	}
+
+	if (tnode_free_size >= PAGE_SIZE * sync_pages) {
+		tnode_free_size = 0;
+		synchronize_rcu();
+	}
 }
 
 static struct leaf *leaf_new(void)