diff mbox series

[2/3] net: Make cleanup_list and net::cleanup_list of llist type

Message ID 151903432575.8021.5189428113212356710.stgit@localhost.localdomain
State Accepted, archived
Delegated to: David Miller
Headers show
Series net: Get rid of net_mutex and simplify cleanup_list queueing | expand

Commit Message

Kirill Tkhai Feb. 19, 2018, 9:58 a.m. UTC
This simplifies cleanup queueing and makes cleanup lists
to use llist primitives. Since llist has its own cmpxchg()
ordering, cleanup_list_lock is not more need.

Also, struct llist_node is smaller, than struct list_head,
so we save some bytes in struct net with this patch.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 include/net/net_namespace.h |    3 ++-
 net/core/net_namespace.c    |   20 ++++++--------------
 2 files changed, 8 insertions(+), 15 deletions(-)

Comments

Cong Wang Feb. 20, 2018, 7:42 p.m. UTC | #1
On Mon, Feb 19, 2018 at 1:58 AM, Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
>  void __put_net(struct net *net)
>  {
>         /* Cleanup the network namespace in process context */
> -       unsigned long flags;
> -
> -       spin_lock_irqsave(&cleanup_list_lock, flags);
> -       list_add(&net->cleanup_list, &cleanup_list);
> -       spin_unlock_irqrestore(&cleanup_list_lock, flags);
> -
> +       llist_add(&net->cleanup_list, &cleanup_list);
>         queue_work(netns_wq, &net_cleanup_work);
>  }

Is llist safe against IRQ too?
Kirill Tkhai Feb. 21, 2018, 8:30 a.m. UTC | #2
On 20.02.2018 22:42, Cong Wang wrote:
> On Mon, Feb 19, 2018 at 1:58 AM, Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
>>  void __put_net(struct net *net)
>>  {
>>         /* Cleanup the network namespace in process context */
>> -       unsigned long flags;
>> -
>> -       spin_lock_irqsave(&cleanup_list_lock, flags);
>> -       list_add(&net->cleanup_list, &cleanup_list);
>> -       spin_unlock_irqrestore(&cleanup_list_lock, flags);
>> -
>> +       llist_add(&net->cleanup_list, &cleanup_list);
>>         queue_work(netns_wq, &net_cleanup_work);
>>  }
> 
> Is llist safe against IRQ too?

Yes, it's safe and it's aimed for the cases like this. There is no "locked"
state like spinlock has, there is single cmpxchg().

You may find examples it's used in ./kernel directory.

Kirill
diff mbox series

Patch

diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 115b01b92f4d..d4417495773a 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -59,12 +59,13 @@  struct net {
 	atomic64_t		cookie_gen;
 
 	struct list_head	list;		/* list of network namespaces */
-	struct list_head	cleanup_list;	/* namespaces on death row */
 	struct list_head	exit_list;	/* To linked to call pernet exit
 						 * methods on dead net (net_sem
 						 * read locked), or to unregister
 						 * pernet ops (net_sem wr locked).
 						 */
+	struct llist_node	cleanup_list;	/* namespaces on death row */
+
 	struct user_namespace   *user_ns;	/* Owning user namespace */
 	struct ucounts		*ucounts;
 	spinlock_t		nsid_lock;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index e89a516620dd..abf8a46e94e2 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -481,21 +481,18 @@  static void unhash_nsid(struct net *net, struct net *last)
 	spin_unlock_bh(&net->nsid_lock);
 }
 
-static DEFINE_SPINLOCK(cleanup_list_lock);
-static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */
+static LLIST_HEAD(cleanup_list);
 
 static void cleanup_net(struct work_struct *work)
 {
 	const struct pernet_operations *ops;
 	struct net *net, *tmp, *last;
-	struct list_head net_kill_list;
+	struct llist_node *net_kill_list;
 	LIST_HEAD(net_exit_list);
 	unsigned write;
 
 	/* Atomically snapshot the list of namespaces to cleanup */
-	spin_lock_irq(&cleanup_list_lock);
-	list_replace_init(&cleanup_list, &net_kill_list);
-	spin_unlock_irq(&cleanup_list_lock);
+	net_kill_list = llist_del_all(&cleanup_list);
 again:
 	write = READ_ONCE(nr_sync_pernet_ops);
 	if (write)
@@ -510,7 +507,7 @@  static void cleanup_net(struct work_struct *work)
 
 	/* Don't let anyone else find us. */
 	rtnl_lock();
-	list_for_each_entry(net, &net_kill_list, cleanup_list)
+	llist_for_each_entry(net, net_kill_list, cleanup_list)
 		list_del_rcu(&net->list);
 	/* Cache last net. After we unlock rtnl, no one new net
 	 * added to net_namespace_list can assign nsid pointer
@@ -525,7 +522,7 @@  static void cleanup_net(struct work_struct *work)
 	last = list_last_entry(&net_namespace_list, struct net, list);
 	rtnl_unlock();
 
-	list_for_each_entry(net, &net_kill_list, cleanup_list) {
+	llist_for_each_entry(net, net_kill_list, cleanup_list) {
 		unhash_nsid(net, last);
 		list_add_tail(&net->exit_list, &net_exit_list);
 	}
@@ -585,12 +582,7 @@  static DECLARE_WORK(net_cleanup_work, cleanup_net);
 void __put_net(struct net *net)
 {
 	/* Cleanup the network namespace in process context */
-	unsigned long flags;
-
-	spin_lock_irqsave(&cleanup_list_lock, flags);
-	list_add(&net->cleanup_list, &cleanup_list);
-	spin_unlock_irqrestore(&cleanup_list_lock, flags);
-
+	llist_add(&net->cleanup_list, &cleanup_list);
 	queue_work(netns_wq, &net_cleanup_work);
 }
 EXPORT_SYMBOL_GPL(__put_net);