Message ID | 20110308.145954.59682618.davem@davemloft.net |
---|---|
State | Accepted, archived |
Delegated to: | David Miller |
Headers | show |
Le mardi 08 mars 2011 à 14:59 -0800, David Miller a écrit : > If modifications on other cpus are ok, then modifications to > the tree during lookup done by the local cpu are ok too. > > Signed-off-by: David S. Miller <davem@davemloft.net> > --- > net/ipv4/inetpeer.c | 18 +++++++++--------- > 1 files changed, 9 insertions(+), 9 deletions(-) > > diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c > index f604ffd..6442c35 100644 > --- a/net/ipv4/inetpeer.c > +++ b/net/ipv4/inetpeer.c > @@ -206,16 +206,16 @@ static int addr_compare(const struct inetpeer_addr *a, > }) > > /* > - * Called with rcu_read_lock_bh() > + * Called with rcu_read_lock() > * Because we hold no lock against a writer, its quite possible we fall > * in an endless loop. > * But every pointer we follow is guaranteed to be valid thanks to RCU. > * We exit from this function if number of links exceeds PEER_MAXDEPTH > */ > -static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr, > - struct inet_peer_base *base) > +static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, > + struct inet_peer_base *base) > { > - struct inet_peer *u = rcu_dereference_bh(base->root); > + struct inet_peer *u = rcu_dereference(base->root); > int count = 0; > > while (u != peer_avl_empty) { > @@ -231,9 +231,9 @@ static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr, > return u; > } > if (cmp == -1) > - u = rcu_dereference_bh(u->avl_left); > + u = rcu_dereference(u->avl_left); > else > - u = rcu_dereference_bh(u->avl_right); > + u = rcu_dereference(u->avl_right); > if (unlikely(++count == PEER_MAXDEPTH)) > break; > } > @@ -470,11 +470,11 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) > /* Look up for the address quickly, lockless. > * Because of a concurrent writer, we might not find an existing entry. > */ > - rcu_read_lock_bh(); > + rcu_read_lock(); > sequence = read_seqbegin(&base->lock); > - p = lookup_rcu_bh(daddr, base); > + p = lookup_rcu(daddr, base); > invalidated = read_seqretry(&base->lock, sequence); > - rcu_read_unlock_bh(); > + rcu_read_unlock(); > > if (p) { > /* The existing node has been found. David, I am not sure this is safe, since we use call_rcu_bh() when freeing one item. One cpu could decide to kfree() one item while another cpu could still use it. rcu_read_lock_bh() was signalling to others cpu we were in a softirq section, so we were delaying a possible kfree(). -- To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
From: Eric Dumazet <eric.dumazet@gmail.com> Date: Sun, 13 Mar 2011 11:04:09 +0100 > David, I am not sure this is safe, since we use call_rcu_bh() when > freeing one item. One cpu could decide to kfree() one item while another > cpu could still use it. > > rcu_read_lock_bh() was signalling to others cpu we were in a softirq > section, so we were delaying a possible kfree(). Ok, could we use normal call_rcu() to solve this then? -- To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index f604ffd..6442c35 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -206,16 +206,16 @@ static int addr_compare(const struct inetpeer_addr *a, }) /* - * Called with rcu_read_lock_bh() + * Called with rcu_read_lock() * Because we hold no lock against a writer, its quite possible we fall * in an endless loop. * But every pointer we follow is guaranteed to be valid thanks to RCU. * We exit from this function if number of links exceeds PEER_MAXDEPTH */ -static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr, - struct inet_peer_base *base) +static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, + struct inet_peer_base *base) { - struct inet_peer *u = rcu_dereference_bh(base->root); + struct inet_peer *u = rcu_dereference(base->root); int count = 0; while (u != peer_avl_empty) { @@ -231,9 +231,9 @@ static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr, return u; } if (cmp == -1) - u = rcu_dereference_bh(u->avl_left); + u = rcu_dereference(u->avl_left); else - u = rcu_dereference_bh(u->avl_right); + u = rcu_dereference(u->avl_right); if (unlikely(++count == PEER_MAXDEPTH)) break; } @@ -470,11 +470,11 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) /* Look up for the address quickly, lockless. * Because of a concurrent writer, we might not find an existing entry. */ - rcu_read_lock_bh(); + rcu_read_lock(); sequence = read_seqbegin(&base->lock); - p = lookup_rcu_bh(daddr, base); + p = lookup_rcu(daddr, base); invalidated = read_seqretry(&base->lock, sequence); - rcu_read_unlock_bh(); + rcu_read_unlock(); if (p) { /* The existing node has been found.
If modifications on other cpus are ok, then modifications to the tree during lookup done by the local cpu are ok too. Signed-off-by: David S. Miller <davem@davemloft.net> --- net/ipv4/inetpeer.c | 18 +++++++++--------- 1 files changed, 9 insertions(+), 9 deletions(-)