diff mbox series

[net-next] rds: tcp: must use spin_lock_irq* and not spin_lock_bh with rds_tcp_conn_lock

Message ID 1521111266-148947-1-git-send-email-sowmini.varadhan@oracle.com
State Accepted, archived
Delegated to: David Miller
Headers show
Series [net-next] rds: tcp: must use spin_lock_irq* and not spin_lock_bh with rds_tcp_conn_lock | expand

Commit Message

Sowmini Varadhan March 15, 2018, 10:54 a.m. UTC
rds_tcp_connection allocation/free management has the potential to be
called from __rds_conn_create after IRQs have been disabled, so
spin_[un]lock_bh cannot be used with rds_tcp_conn_lock.

Bottom-halves that need to synchronize for critical sections protected
by rds_tcp_conn_lock should instead use rds_destroy_pending() correctly.

Reported-by: syzbot+c68e51bb5e699d3f8d91@syzkaller.appspotmail.com
Fixes: ebeeb1ad9b8a ("rds: tcp: use rds_destroy_pending() to synchronize
       netns/module teardown and rds connection/workq management")
Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
---
 net/rds/tcp.c |   17 +++++++++--------
 1 files changed, 9 insertions(+), 8 deletions(-)

Comments

Santosh Shilimkar March 15, 2018, 5:52 p.m. UTC | #1
On 3/15/2018 3:54 AM, Sowmini Varadhan wrote:
> rds_tcp_connection allocation/free management has the potential to be
> called from __rds_conn_create after IRQs have been disabled, so
> spin_[un]lock_bh cannot be used with rds_tcp_conn_lock.
> 
> Bottom-halves that need to synchronize for critical sections protected
> by rds_tcp_conn_lock should instead use rds_destroy_pending() correctly.
> 
> Reported-by: syzbot+c68e51bb5e699d3f8d91@syzkaller.appspotmail.com
> Fixes: ebeeb1ad9b8a ("rds: tcp: use rds_destroy_pending() to synchronize
>         netns/module teardown and rds connection/workq management")
> Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
> ---
Thanks Sowmini for the WARN_ON() discussion off-list.

Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
David Miller March 17, 2018, 9:19 p.m. UTC | #2
From: Sowmini Varadhan <sowmini.varadhan@oracle.com>
Date: Thu, 15 Mar 2018 03:54:26 -0700

> rds_tcp_connection allocation/free management has the potential to be
> called from __rds_conn_create after IRQs have been disabled, so
> spin_[un]lock_bh cannot be used with rds_tcp_conn_lock.
> 
> Bottom-halves that need to synchronize for critical sections protected
> by rds_tcp_conn_lock should instead use rds_destroy_pending() correctly.
> 
> Reported-by: syzbot+c68e51bb5e699d3f8d91@syzkaller.appspotmail.com
> Fixes: ebeeb1ad9b8a ("rds: tcp: use rds_destroy_pending() to synchronize
>        netns/module teardown and rds connection/workq management")
> Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>

Applied, thank you.
diff mbox series

Patch

diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index eb04e7f..08ea9cd 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -272,13 +272,14 @@  static int rds_tcp_laddr_check(struct net *net, __be32 addr)
 static void rds_tcp_conn_free(void *arg)
 {
 	struct rds_tcp_connection *tc = arg;
+	unsigned long flags;
 
 	rdsdebug("freeing tc %p\n", tc);
 
-	spin_lock_bh(&rds_tcp_conn_lock);
+	spin_lock_irqsave(&rds_tcp_conn_lock, flags);
 	if (!tc->t_tcp_node_detached)
 		list_del(&tc->t_tcp_node);
-	spin_unlock_bh(&rds_tcp_conn_lock);
+	spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
 
 	kmem_cache_free(rds_tcp_conn_slab, tc);
 }
@@ -308,13 +309,13 @@  static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
 		rdsdebug("rds_conn_path [%d] tc %p\n", i,
 			 conn->c_path[i].cp_transport_data);
 	}
-	spin_lock_bh(&rds_tcp_conn_lock);
+	spin_lock_irq(&rds_tcp_conn_lock);
 	for (i = 0; i < RDS_MPATH_WORKERS; i++) {
 		tc = conn->c_path[i].cp_transport_data;
 		tc->t_tcp_node_detached = false;
 		list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
 	}
-	spin_unlock_bh(&rds_tcp_conn_lock);
+	spin_unlock_irq(&rds_tcp_conn_lock);
 fail:
 	if (ret) {
 		for (j = 0; j < i; j++)
@@ -527,7 +528,7 @@  static void rds_tcp_kill_sock(struct net *net)
 
 	rtn->rds_tcp_listen_sock = NULL;
 	rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
-	spin_lock_bh(&rds_tcp_conn_lock);
+	spin_lock_irq(&rds_tcp_conn_lock);
 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
 		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
 
@@ -540,7 +541,7 @@  static void rds_tcp_kill_sock(struct net *net)
 			tc->t_tcp_node_detached = true;
 		}
 	}
-	spin_unlock_bh(&rds_tcp_conn_lock);
+	spin_unlock_irq(&rds_tcp_conn_lock);
 	list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
 		rds_conn_destroy(tc->t_cpath->cp_conn);
 }
@@ -588,7 +589,7 @@  static void rds_tcp_sysctl_reset(struct net *net)
 {
 	struct rds_tcp_connection *tc, *_tc;
 
-	spin_lock_bh(&rds_tcp_conn_lock);
+	spin_lock_irq(&rds_tcp_conn_lock);
 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
 		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
 
@@ -598,7 +599,7 @@  static void rds_tcp_sysctl_reset(struct net *net)
 		/* reconnect with new parameters */
 		rds_conn_path_drop(tc->t_cpath, false);
 	}
-	spin_unlock_bh(&rds_tcp_conn_lock);
+	spin_unlock_irq(&rds_tcp_conn_lock);
 }
 
 static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,