diff mbox

[3/3] af_key: locking change

Message ID 20100219220111.868268353@vyatta.com
State Changes Requested, archived
Delegated to: David Miller
Headers show

Commit Message

stephen hemminger Feb. 19, 2010, 9:59 p.m. UTC
Get rid of custom locking that was using wait queue, lock, and atomic
to basically build a queued mutex.  Use RCU for read side.

Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>

Comments

Eric Dumazet Feb. 20, 2010, 4:07 a.m. UTC | #1
Le vendredi 19 février 2010 à 13:59 -0800, Stephen Hemminger a écrit :
> pièce jointe document texte brut (pfkey-rcu.patch)
> Get rid of custom locking that was using wait queue, lock, and atomic
> to basically build a queued mutex.  Use RCU for read side.
> 
> Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>

Excellent

Acked-by: Eric Dumazet <eric.dumazet@gmail.com>

I dont know why you used synchronize_sched() instead of
synchronize_rcu().

I prefer _rcu() variant because its all about rcu after all, and fact it
is mapped to synchronize_sched() is an implementation detail.

(I am not saying implementation doesnt matter, Paul :) )

>  }
>  
>  static struct proto key_proto = {
> @@ -223,6 +177,7 @@ static int pfkey_release(struct socket *
>  	sock_orphan(sk);
>  	sock->sk = NULL;
>  	skb_queue_purge(&sk->sk_write_queue);
> +	synchronize_sched();
>  	sock_put(sk);
>  
>  	return 0;


--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
stephen hemminger Feb. 20, 2010, 5:53 a.m. UTC | #2
On Sat, 20 Feb 2010 05:07:30 +0100
Eric Dumazet <eric.dumazet@gmail.com> wrote:

> Le vendredi 19 février 2010 à 13:59 -0800, Stephen Hemminger a écrit :
> > pièce jointe document texte brut (pfkey-rcu.patch)
> > Get rid of custom locking that was using wait queue, lock, and atomic
> > to basically build a queued mutex.  Use RCU for read side.
> > 
> > Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
> 
> Excellent
> 
> Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
> 
> I dont know why you used synchronize_sched() instead of
> synchronize_rcu().

I'll go back to synchronize_rcu.
David Miller Feb. 20, 2010, 8:51 p.m. UTC | #3
From: Stephen Hemminger <shemminger@vyatta.com>
Date: Fri, 19 Feb 2010 21:53:14 -0800

> On Sat, 20 Feb 2010 05:07:30 +0100
> Eric Dumazet <eric.dumazet@gmail.com> wrote:
> 
>> Le vendredi 19 février 2010 à 13:59 -0800, Stephen Hemminger a écrit :
>> > pièce jointe document texte brut (pfkey-rcu.patch)
>> > Get rid of custom locking that was using wait queue, lock, and atomic
>> > to basically build a queued mutex.  Use RCU for read side.
>> > 
>> > Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
>> 
>> Excellent
>> 
>> Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
>> 
>> I dont know why you used synchronize_sched() instead of
>> synchronize_rcu().
> 
> I'll go back to synchronize_rcu.

Please also add some comments for patch #1 as requested by
Al Viro.

When you post the new series with these changes I'll apply
it to net-next-2.6, thanks!
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

--- a/net/key/af_key.c	2010-02-19 13:58:18.185494054 -0800
+++ b/net/key/af_key.c	2010-02-19 13:58:48.805994639 -0800
@@ -41,9 +41,7 @@  struct netns_pfkey {
 	struct hlist_head table;
 	atomic_t socks_nr;
 };
-static DECLARE_WAIT_QUEUE_HEAD(pfkey_table_wait);
-static DEFINE_RWLOCK(pfkey_table_lock);
-static atomic_t pfkey_table_users = ATOMIC_INIT(0);
+static DEFINE_MUTEX(pfkey_mutex);
 
 struct pfkey_sock {
 	/* struct sock must be the first member of struct pfkey_sock */
@@ -108,50 +106,6 @@  static void pfkey_sock_destruct(struct s
 	atomic_dec(&net_pfkey->socks_nr);
 }
 
-static void pfkey_table_grab(void)
-{
-	write_lock_bh(&pfkey_table_lock);
-
-	if (atomic_read(&pfkey_table_users)) {
-		DECLARE_WAITQUEUE(wait, current);
-
-		add_wait_queue_exclusive(&pfkey_table_wait, &wait);
-		for(;;) {
-			set_current_state(TASK_UNINTERRUPTIBLE);
-			if (atomic_read(&pfkey_table_users) == 0)
-				break;
-			write_unlock_bh(&pfkey_table_lock);
-			schedule();
-			write_lock_bh(&pfkey_table_lock);
-		}
-
-		__set_current_state(TASK_RUNNING);
-		remove_wait_queue(&pfkey_table_wait, &wait);
-	}
-}
-
-static __inline__ void pfkey_table_ungrab(void)
-{
-	write_unlock_bh(&pfkey_table_lock);
-	wake_up(&pfkey_table_wait);
-}
-
-static __inline__ void pfkey_lock_table(void)
-{
-	/* read_lock() synchronizes us to pfkey_table_grab */
-
-	read_lock(&pfkey_table_lock);
-	atomic_inc(&pfkey_table_users);
-	read_unlock(&pfkey_table_lock);
-}
-
-static __inline__ void pfkey_unlock_table(void)
-{
-	if (atomic_dec_and_test(&pfkey_table_users))
-		wake_up(&pfkey_table_wait);
-}
-
-
 static const struct proto_ops pfkey_ops;
 
 static void pfkey_insert(struct sock *sk)
@@ -159,16 +113,16 @@  static void pfkey_insert(struct sock *sk
 	struct net *net = sock_net(sk);
 	struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
 
-	pfkey_table_grab();
-	sk_add_node(sk, &net_pfkey->table);
-	pfkey_table_ungrab();
+	mutex_lock(&pfkey_mutex);
+	sk_add_node_rcu(sk, &net_pfkey->table);
+	mutex_unlock(&pfkey_mutex);
 }
 
 static void pfkey_remove(struct sock *sk)
 {
-	pfkey_table_grab();
-	sk_del_node_init(sk);
-	pfkey_table_ungrab();
+	mutex_lock(&pfkey_mutex);
+	sk_del_node_init_rcu(sk);
+	mutex_unlock(&pfkey_mutex);
 }
 
 static struct proto key_proto = {
@@ -223,6 +177,7 @@  static int pfkey_release(struct socket *
 	sock_orphan(sk);
 	sock->sk = NULL;
 	skb_queue_purge(&sk->sk_write_queue);
+	synchronize_sched();
 	sock_put(sk);
 
 	return 0;
@@ -277,8 +232,8 @@  static int pfkey_broadcast(struct sk_buf
 	if (!skb)
 		return -ENOMEM;
 
-	pfkey_lock_table();
-	sk_for_each(sk, node, &net_pfkey->table) {
+	rcu_read_lock();
+	sk_for_each_rcu(sk, node, &net_pfkey->table) {
 		struct pfkey_sock *pfk = pfkey_sk(sk);
 		int err2;
 
@@ -309,7 +264,7 @@  static int pfkey_broadcast(struct sk_buf
 		if ((broadcast_flags & BROADCAST_REGISTERED) && err)
 			err = err2;
 	}
-	pfkey_unlock_table();
+	rcu_read_unlock();
 
 	if (one_sk != NULL)
 		err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
@@ -3702,8 +3657,8 @@  static void *pfkey_seq_start(struct seq_
 	struct net *net = seq_file_net(f);
 	struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
 
-	read_lock(&pfkey_table_lock);
-	return seq_hlist_start_head(&net_pfkey->table, *ppos);
+	rcu_read_lock();
+	return seq_hlist_start_head_rcu(&net_pfkey->table, *ppos);
 }
 
 static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
@@ -3711,12 +3666,12 @@  static void *pfkey_seq_next(struct seq_f
 	struct net *net = seq_file_net(f);
 	struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
 
-	return seq_hlist_next(v, &net_pfkey->table, ppos);
+	return seq_hlist_next_rcu(v, &net_pfkey->table, ppos);
 }
 
 static void pfkey_seq_stop(struct seq_file *f, void *v)
 {
-	read_unlock(&pfkey_table_lock);
+	rcu_read_unlock();
 }
 
 static const struct seq_operations pfkey_seq_ops = {