Message ID | 1321877641-25747-2-git-send-email-apw@canonical.com |
---|---|
State | New |
Headers | show |
On 21.11.2011 13:14, Andy Whitcroft wrote: > From: Eric Dumazet <eric.dumazet@gmail.com> > > [ Upstream commit 66944e1c5797562cebe2d1857d46dff60bf9a69e ] > > On 64bit arches, we use 752 bytes of stack when cleanup_once() is called > from inet_getpeer(). > > Lets share the avl stack to save ~376 bytes. > > Before patch : > > 0x000006c3 unlink_from_pool [inetpeer.o]: 376 > 0x00000721 unlink_from_pool [inetpeer.o]: 376 > 0x00000cb1 inet_getpeer [inetpeer.o]: 376 > 0x00000e6d inet_getpeer [inetpeer.o]: 376 > 0x0004 inet_initpeers [inetpeer.o]: 112 > text data bss dec hex filename > 5320 432 21 5773 168d net/ipv4/inetpeer.o > > After patch : > > objdump -d net/ipv4/inetpeer.o | scripts/checkstack.pl > 0x00000c11 inet_getpeer [inetpeer.o]: 376 > 0x00000dcd inet_getpeer [inetpeer.o]: 376 > 0x00000ab9 peer_check_expire [inetpeer.o]: 328 > 0x00000b7f peer_check_expire [inetpeer.o]: 328 > 0x0004 inet_initpeers [inetpeer.o]: 112 > text data bss dec hex filename > 5163 432 21 5616 15f0 net/ipv4/inetpeer.o > > Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> > Cc: Scot Doyle <lkml@scotdoyle.com> > Cc: Stephen Hemminger <shemminger@vyatta.com> > Cc: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com> > Reviewed-by: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com> > Signed-off-by: David S. Miller <davem@davemloft.net> > Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> > Signed-off-by: Tim Gardner <tim.gardner@canonical.com> > > CVE-2011-4087 > Signed-off-by: Andy Whitcroft <apw@canonical.com> > --- > net/ipv4/inetpeer.c | 13 +++++++------ > 1 files changed, 7 insertions(+), 6 deletions(-) > > diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c > index f231d1d..824e5f1 100644 > --- a/net/ipv4/inetpeer.c > +++ b/net/ipv4/inetpeer.c > @@ -367,7 +367,8 @@ static void inetpeer_free_rcu(struct rcu_head *head) > } > > /* May be called with local BH enabled. */ > -static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) > +static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, > + struct inet_peer __rcu **stack[PEER_MAXDEPTH]) > { > int do_free; > > @@ -381,7 +382,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) > * We use refcnt=-1 to alert lockless readers this entry is deleted. > */ > if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { > - struct inet_peer __rcu **stack[PEER_MAXDEPTH]; > struct inet_peer __rcu ***stackptr, ***delp; > if (lookup(&p->daddr, stack, base) != p) > BUG(); > @@ -436,7 +436,7 @@ static struct inet_peer_base *peer_to_base(struct inet_peer *p) > } > > /* May be called with local BH enabled. */ > -static int cleanup_once(unsigned long ttl) > +static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH]) > { > struct inet_peer *p = NULL; > > @@ -468,7 +468,7 @@ static int cleanup_once(unsigned long ttl) > * happen because of entry limits in route cache. */ > return -1; > > - unlink_from_pool(p, peer_to_base(p)); > + unlink_from_pool(p, peer_to_base(p), stack); > return 0; > } > > @@ -524,7 +524,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) > > if (base->total >= inet_peer_threshold) > /* Remove one less-recently-used entry. */ > - cleanup_once(0); > + cleanup_once(0, stack); > > return p; > } > @@ -540,6 +540,7 @@ static void peer_check_expire(unsigned long dummy) > { > unsigned long now = jiffies; > int ttl, total; > + struct inet_peer __rcu **stack[PEER_MAXDEPTH]; > > total = compute_total(); > if (total >= inet_peer_threshold) > @@ -548,7 +549,7 @@ static void peer_check_expire(unsigned long dummy) > ttl = inet_peer_maxttl > - (inet_peer_maxttl - inet_peer_minttl) / HZ * > total / inet_peer_threshold * HZ; > - while (!cleanup_once(ttl)) { > + while (!cleanup_once(ttl, stack)) { > if (jiffies != now) > break; > } Not really sure how the stack saving helps, but it is matching the upstream change (while the upstream code already looks completely different, ugh). And it seems safe as inet_getpeer uses its local array and the call of unlink_from_pool is last in cleanup_once...
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index f231d1d..824e5f1 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -367,7 +367,8 @@ static void inetpeer_free_rcu(struct rcu_head *head) } /* May be called with local BH enabled. */ -static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) +static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, + struct inet_peer __rcu **stack[PEER_MAXDEPTH]) { int do_free; @@ -381,7 +382,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) * We use refcnt=-1 to alert lockless readers this entry is deleted. */ if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { - struct inet_peer __rcu **stack[PEER_MAXDEPTH]; struct inet_peer __rcu ***stackptr, ***delp; if (lookup(&p->daddr, stack, base) != p) BUG(); @@ -436,7 +436,7 @@ static struct inet_peer_base *peer_to_base(struct inet_peer *p) } /* May be called with local BH enabled. */ -static int cleanup_once(unsigned long ttl) +static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH]) { struct inet_peer *p = NULL; @@ -468,7 +468,7 @@ static int cleanup_once(unsigned long ttl) * happen because of entry limits in route cache. */ return -1; - unlink_from_pool(p, peer_to_base(p)); + unlink_from_pool(p, peer_to_base(p), stack); return 0; } @@ -524,7 +524,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) if (base->total >= inet_peer_threshold) /* Remove one less-recently-used entry. */ - cleanup_once(0); + cleanup_once(0, stack); return p; } @@ -540,6 +540,7 @@ static void peer_check_expire(unsigned long dummy) { unsigned long now = jiffies; int ttl, total; + struct inet_peer __rcu **stack[PEER_MAXDEPTH]; total = compute_total(); if (total >= inet_peer_threshold) @@ -548,7 +549,7 @@ static void peer_check_expire(unsigned long dummy) ttl = inet_peer_maxttl - (inet_peer_maxttl - inet_peer_minttl) / HZ * total / inet_peer_threshold * HZ; - while (!cleanup_once(ttl)) { + while (!cleanup_once(ttl, stack)) { if (jiffies != now) break; }